diff --git a/.gitignore b/.gitignore index 4002f4168..34494cd8d 100644 --- a/.gitignore +++ b/.gitignore @@ -16,7 +16,7 @@ *.out # GraphQL generated output -pkg/models/generated_*.go +internal/api/generated_*.go ui/v2.5/src/core/generated-*.tsx #### diff --git a/Makefile b/Makefile index 0bfd11308..406d2a22d 100644 --- a/Makefile +++ b/Makefile @@ -162,6 +162,10 @@ generate-frontend: generate-backend: touch-ui go generate -mod=vendor ./cmd/stash +.PHONY: generate-dataloaders +generate-dataloaders: + go generate -mod=vendor ./internal/api/loaders + # Regenerates stash-box client files .PHONY: generate-stash-box-client generate-stash-box-client: diff --git a/go.mod b/go.mod index d6ccb07f6..4a8f12197 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a github.com/jmoiron/sqlx v1.3.1 github.com/json-iterator/go v1.1.12 - github.com/mattn/go-sqlite3 v1.14.6 + github.com/mattn/go-sqlite3 v1.14.7 github.com/natefinch/pie v0.0.0-20170715172608-9a0d72014007 github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 github.com/remeh/sizedwaitgroup v1.0.0 @@ -36,17 +36,18 @@ require ( github.com/vektra/mockery/v2 v2.10.0 golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb - golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9 - golang.org/x/sys v0.0.0-20220329152356-43be30ef3008 + golang.org/x/net v0.0.0-20220722155237-a158d28d115b + golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/text v0.3.7 - golang.org/x/tools v0.1.10 // indirect + golang.org/x/tools v0.1.12 // indirect gopkg.in/sourcemap.v1 v1.0.5 // indirect gopkg.in/yaml.v2 v2.4.0 ) require ( github.com/asticode/go-astisub v0.20.0 + github.com/doug-martin/goqu/v9 v9.18.0 github.com/go-chi/httplog v0.2.1 github.com/go-toast/toast v0.0.0-20190211030409-01e6764cf0a4 github.com/hashicorp/golang-lru v0.5.4 @@ -55,7 +56,9 @@ require ( github.com/lucasb-eyer/go-colorful v1.2.0 github.com/spf13/cast v1.4.1 github.com/vearutop/statigz v1.1.6 + github.com/vektah/dataloaden v0.3.0 github.com/vektah/gqlparser/v2 v2.4.1 + gopkg.in/guregu/null.v4 v4.0.0 ) require ( @@ -98,8 +101,7 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/urfave/cli/v2 v2.4.0 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/go.sum b/go.sum index 43ca36369..06230c953 100644 --- a/go.sum +++ b/go.sum @@ -65,6 +65,8 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -206,6 +208,8 @@ github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompati github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/doug-martin/goqu/v9 v9.18.0 h1:/6bcuEtAe6nsSMVK/M+fOiXUNfyFF3yYtE07DBPFMYY= +github.com/doug-martin/goqu/v9 v9.18.0/go.mod h1:nf0Wc2/hV3gYK9LiyqIrzBEVGlI8qW3GuDCEobC4wBQ= github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= @@ -248,8 +252,9 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-toast/toast v0.0.0-20190211030409-01e6764cf0a4 h1:qZNfIGkIANxGv/OqtnntR4DfOY2+BgwR60cAcu/i3SE= github.com/go-toast/toast v0.0.0-20190211030409-01e6764cf0a4/go.mod h1:kW3HQ4UdaAyrUCSSDR4xUzBKW6O2iA4uHhk7AtyYp10= @@ -535,8 +540,9 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.1 h1:6VXZrLU0jHBYyAqrSPa+MgPfnSvTPuMgK+k0o5kVFWo= +github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= @@ -570,8 +576,9 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA= +github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= @@ -746,6 +753,8 @@ github.com/urfave/cli/v2 v2.4.0 h1:m2pxjjDFgDxSPtO8WSdbndj17Wu2y8vOT86wE/tjr+I= github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg= github.com/vearutop/statigz v1.1.6 h1:si1zvulh/6P4S/SjFticuKQ8/EgQISglaRuycj8PWso= github.com/vearutop/statigz v1.1.6/go.mod h1:czAv7iXgPv/s+xsgXpVEhhD0NSOQ4wZPgmM/n7LANDI= +github.com/vektah/dataloaden v0.3.0 h1:ZfVN2QD6swgvp+tDqdH/OIT/wu3Dhu0cus0k5gIZS84= +github.com/vektah/dataloaden v0.3.0/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U= github.com/vektah/gqlparser/v2 v2.4.0/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= github.com/vektah/gqlparser/v2 v2.4.1 h1:QOyEn8DAPMUMARGMeshKDkDgNmVoEaEGiDB0uWxcSlQ= github.com/vektah/gqlparser/v2 v2.4.1/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= @@ -764,6 +773,7 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= @@ -856,8 +866,9 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -913,8 +924,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9 h1:0qxwC5n+ttVOINCBeRHO0nq9X7uy8SDsPoi5OaCdIEI= -golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -947,6 +958,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1040,8 +1052,10 @@ golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220329152356-43be30ef3008 h1:pq9pwoi2rjLWvmiVser/lIOgiyA3fli4M+RfGVMA7nE= -golang.org/x/sys v0.0.0-20220329152356-43be30ef3008/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= +golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1071,6 +1085,7 @@ golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190515012406-7d7faa4812bd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1128,14 +1143,14 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -1300,6 +1315,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8X gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/guregu/null.v4 v4.0.0 h1:1Wm3S1WEA2I26Kq+6vcW+w0gcDo44YKYD7YIEJNHDjg= +gopkg.in/guregu/null.v4 v4.0.0/go.mod h1:YoQhUrADuG3i9WqesrCmpNRwm1ypAgSHYqoOcTu/JrI= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= diff --git a/gqlgen.yml b/gqlgen.yml index 8762177d3..9e419a002 100644 --- a/gqlgen.yml +++ b/gqlgen.yml @@ -4,46 +4,122 @@ schema: - "graphql/schema/types/*.graphql" - "graphql/schema/*.graphql" exec: - filename: pkg/models/generated_exec.go + filename: internal/api/generated_exec.go model: - filename: pkg/models/generated_models.go + filename: internal/api/generated_models.go resolver: filename: internal/api/resolver.go type: Resolver struct_tag: gqlgen +autobind: + - github.com/stashapp/stash/pkg/models + - github.com/stashapp/stash/pkg/plugin + - github.com/stashapp/stash/pkg/scraper + - github.com/stashapp/stash/internal/identify + - github.com/stashapp/stash/internal/dlna + - github.com/stashapp/stash/pkg/scraper/stashbox + models: # Scalars Timestamp: model: github.com/stashapp/stash/pkg/models.Timestamp - # Objects - Gallery: - model: github.com/stashapp/stash/pkg/models.Gallery + Int64: + model: github.com/stashapp/stash/pkg/models.Int64 + # define to force resolvers Image: model: github.com/stashapp/stash/pkg/models.Image - ImageFileType: - model: github.com/stashapp/stash/pkg/models.ImageFileType - Performer: - model: github.com/stashapp/stash/pkg/models.Performer - Scene: - model: github.com/stashapp/stash/pkg/models.Scene - SceneMarker: - model: github.com/stashapp/stash/pkg/models.SceneMarker - ScrapedItem: - model: github.com/stashapp/stash/pkg/models.ScrapedItem - Studio: - model: github.com/stashapp/stash/pkg/models.Studio - Movie: - model: github.com/stashapp/stash/pkg/models.Movie - Tag: - model: github.com/stashapp/stash/pkg/models.Tag - SceneFileType: - model: github.com/stashapp/stash/pkg/models.SceneFileType - SavedFilter: - model: github.com/stashapp/stash/pkg/models.SavedFilter - StashID: + fields: + title: + resolver: true + # autobind on config causes generation issues + StashConfig: + model: github.com/stashapp/stash/internal/manager/config.StashConfig + StashConfigInput: + model: github.com/stashapp/stash/internal/manager/config.StashConfigInput + StashBoxInput: + model: github.com/stashapp/stash/internal/manager/config.StashBoxInput + ConfigImageLightboxResult: + model: github.com/stashapp/stash/internal/manager/config.ConfigImageLightboxResult + ImageLightboxDisplayMode: + model: github.com/stashapp/stash/internal/manager/config.ImageLightboxDisplayMode + ImageLightboxScrollMode: + model: github.com/stashapp/stash/internal/manager/config.ImageLightboxScrollMode + ConfigDisableDropdownCreate: + model: github.com/stashapp/stash/internal/manager/config.ConfigDisableDropdownCreate + ScanMetadataOptions: + model: github.com/stashapp/stash/internal/manager/config.ScanMetadataOptions + AutoTagMetadataOptions: + model: github.com/stashapp/stash/internal/manager/config.AutoTagMetadataOptions + SceneParserInput: + model: github.com/stashapp/stash/internal/manager.SceneParserInput + SceneParserResult: + model: github.com/stashapp/stash/internal/manager.SceneParserResult + SceneMovieID: + model: github.com/stashapp/stash/internal/manager.SceneMovieID + SystemStatus: + model: github.com/stashapp/stash/internal/manager.SystemStatus + SystemStatusEnum: + model: github.com/stashapp/stash/internal/manager.SystemStatusEnum + ImportDuplicateEnum: + model: github.com/stashapp/stash/internal/manager.ImportDuplicateEnum + SetupInput: + model: github.com/stashapp/stash/internal/manager.SetupInput + MigrateInput: + model: github.com/stashapp/stash/internal/manager.MigrateInput + ScanMetadataInput: + model: github.com/stashapp/stash/internal/manager.ScanMetadataInput + GenerateMetadataInput: + model: github.com/stashapp/stash/internal/manager.GenerateMetadataInput + GeneratePreviewOptionsInput: + model: github.com/stashapp/stash/internal/manager.GeneratePreviewOptionsInput + AutoTagMetadataInput: + model: github.com/stashapp/stash/internal/manager.AutoTagMetadataInput + CleanMetadataInput: + model: github.com/stashapp/stash/internal/manager.CleanMetadataInput + StashBoxBatchPerformerTagInput: + model: github.com/stashapp/stash/internal/manager.StashBoxBatchPerformerTagInput + SceneStreamEndpoint: + model: github.com/stashapp/stash/internal/manager.SceneStreamEndpoint + ExportObjectTypeInput: + model: github.com/stashapp/stash/internal/manager.ExportObjectTypeInput + ExportObjectsInput: + model: github.com/stashapp/stash/internal/manager.ExportObjectsInput + ImportObjectsInput: + model: github.com/stashapp/stash/internal/manager.ImportObjectsInput + ScanMetaDataFilterInput: + model: github.com/stashapp/stash/internal/manager.ScanMetaDataFilterInput + # renamed types + BulkUpdateIdMode: + model: github.com/stashapp/stash/pkg/models.RelationshipUpdateMode + DLNAStatus: + model: github.com/stashapp/stash/internal/dlna.Status + DLNAIP: + model: github.com/stashapp/stash/internal/dlna.Dlnaip + IdentifySource: + model: github.com/stashapp/stash/internal/identify.Source + IdentifyMetadataTaskOptions: + model: github.com/stashapp/stash/internal/identify.Options + IdentifyMetadataInput: + model: github.com/stashapp/stash/internal/identify.Options + IdentifyMetadataOptions: + model: github.com/stashapp/stash/internal/identify.MetadataOptions + IdentifyFieldOptions: + model: github.com/stashapp/stash/internal/identify.FieldOptions + IdentifyFieldStrategy: + model: github.com/stashapp/stash/internal/identify.FieldStrategy + ScraperSource: + model: github.com/stashapp/stash/pkg/scraper.Source + # rebind inputs to types + StashIDInput: model: github.com/stashapp/stash/pkg/models.StashID - SceneCaption: - model: github.com/stashapp/stash/pkg/models.SceneCaption - + IdentifySourceInput: + model: github.com/stashapp/stash/internal/identify.Source + IdentifyFieldOptionsInput: + model: github.com/stashapp/stash/internal/identify.FieldOptions + IdentifyMetadataOptionsInput: + model: github.com/stashapp/stash/internal/identify.MetadataOptions + ScraperSourceInput: + model: github.com/stashapp/stash/pkg/scraper.Source + diff --git a/graphql/documents/data/file.graphql b/graphql/documents/data/file.graphql new file mode 100644 index 000000000..108025ed5 --- /dev/null +++ b/graphql/documents/data/file.graphql @@ -0,0 +1,40 @@ +fragment FolderData on Folder { + id + path +} + +fragment VideoFileData on VideoFile { + path + size + duration + video_codec + audio_codec + width + height + frame_rate + bit_rate + fingerprints { + type + value + } +} + +fragment ImageFileData on ImageFile { + path + size + width + height + fingerprints { + type + value + } +} + +fragment GalleryFileData on GalleryFile { + path + size + fingerprints { + type + value + } +} \ No newline at end of file diff --git a/graphql/documents/data/gallery-slim.graphql b/graphql/documents/data/gallery-slim.graphql index c408f8deb..ea98d30f0 100644 --- a/graphql/documents/data/gallery-slim.graphql +++ b/graphql/documents/data/gallery-slim.graphql @@ -1,19 +1,21 @@ fragment SlimGalleryData on Gallery { id - checksum - path title date url details rating organized + files { + ...GalleryFileData + } + folder { + ...FolderData + } image_count cover { - file { - size - width - height + files { + ...ImageFileData } paths { @@ -37,8 +39,6 @@ fragment SlimGalleryData on Gallery { image_path } scenes { - id - title - path + ...SlimSceneData } } diff --git a/graphql/documents/data/gallery.graphql b/graphql/documents/data/gallery.graphql index 2bcd8e352..9d43244e9 100644 --- a/graphql/documents/data/gallery.graphql +++ b/graphql/documents/data/gallery.graphql @@ -1,7 +1,5 @@ fragment GalleryData on Gallery { id - checksum - path created_at updated_at title @@ -10,6 +8,14 @@ fragment GalleryData on Gallery { details rating organized + + files { + ...GalleryFileData + } + folder { + ...FolderData + } + images { ...SlimImageData } diff --git a/graphql/documents/data/image-slim.graphql b/graphql/documents/data/image-slim.graphql index b1c066ee2..37b0bc86f 100644 --- a/graphql/documents/data/image-slim.graphql +++ b/graphql/documents/data/image-slim.graphql @@ -1,16 +1,12 @@ fragment SlimImageData on Image { id - checksum title rating organized o_counter - path - file { - size - width - height + files { + ...ImageFileData } paths { @@ -20,8 +16,13 @@ fragment SlimImageData on Image { galleries { id - path title + files { + path + } + folder { + path + } } studio { diff --git a/graphql/documents/data/image.graphql b/graphql/documents/data/image.graphql index cb71b0281..4fe1f0d0e 100644 --- a/graphql/documents/data/image.graphql +++ b/graphql/documents/data/image.graphql @@ -1,18 +1,14 @@ fragment ImageData on Image { id - checksum title rating organized o_counter - path created_at updated_at - file { - size - width - height + files { + ...ImageFileData } paths { diff --git a/graphql/documents/data/scene-slim.graphql b/graphql/documents/data/scene-slim.graphql index c3d759e61..a199fde90 100644 --- a/graphql/documents/data/scene-slim.graphql +++ b/graphql/documents/data/scene-slim.graphql @@ -1,7 +1,5 @@ fragment SlimSceneData on Scene { id - checksum - oshash title details url @@ -9,24 +7,11 @@ fragment SlimSceneData on Scene { rating o_counter organized - path - phash interactive interactive_speed - captions { - language_code - caption_type - } - file { - size - duration - video_codec - audio_codec - width - height - framerate - bitrate + files { + ...VideoFileData } paths { diff --git a/graphql/documents/data/scene.graphql b/graphql/documents/data/scene.graphql index 0cbd73468..13a672900 100644 --- a/graphql/documents/data/scene.graphql +++ b/graphql/documents/data/scene.graphql @@ -1,7 +1,5 @@ fragment SceneData on Scene { id - checksum - oshash title details url @@ -9,8 +7,6 @@ fragment SceneData on Scene { rating o_counter organized - path - phash interactive interactive_speed captions { @@ -20,15 +16,8 @@ fragment SceneData on Scene { created_at updated_at - file { - size - duration - video_codec - audio_codec - width - height - framerate - bitrate + files { + ...VideoFileData } paths { diff --git a/graphql/documents/queries/scene.graphql b/graphql/documents/queries/scene.graphql index 17e4e1d61..c34222b66 100644 --- a/graphql/documents/queries/scene.graphql +++ b/graphql/documents/queries/scene.graphql @@ -4,7 +4,7 @@ query FindScenes($filter: FindFilterType, $scene_filter: SceneFilterType, $scene filesize duration scenes { - ...SceneData + ...SlimSceneData } } } diff --git a/graphql/schema/types/file.graphql b/graphql/schema/types/file.graphql new file mode 100644 index 000000000..2493b622f --- /dev/null +++ b/graphql/schema/types/file.graphql @@ -0,0 +1,97 @@ +type Fingerprint { + type: String! + value: String! +} + +type Folder { + id: ID! + path: String! + + parent_folder_id: ID + zip_file_id: ID + + mod_time: Time! + + created_at: Time! + updated_at: Time! +} + +interface BaseFile { + id: ID! + path: String! + basename: String! + + parent_folder_id: ID! + zip_file_id: ID + + mod_time: Time! + size: Int64! + + fingerprints: [Fingerprint!]! + + created_at: Time! + updated_at: Time! +} + +type VideoFile implements BaseFile { + id: ID! + path: String! + basename: String! + + parent_folder_id: ID! + zip_file_id: ID + + mod_time: Time! + size: Int64! + + fingerprints: [Fingerprint!]! + + format: String! + width: Int! + height: Int! + duration: Float! + video_codec: String! + audio_codec: String! + frame_rate: Float! + bit_rate: Int! + + created_at: Time! + updated_at: Time! +} + +type ImageFile implements BaseFile { + id: ID! + path: String! + basename: String! + + parent_folder_id: ID! + zip_file_id: ID + + mod_time: Time! + size: Int64! + + fingerprints: [Fingerprint!]! + + width: Int! + height: Int! + + created_at: Time! + updated_at: Time! +} + +type GalleryFile implements BaseFile { + id: ID! + path: String! + basename: String! + + parent_folder_id: ID! + zip_file_id: ID + + mod_time: Time! + size: Int64! + + fingerprints: [Fingerprint!]! + + created_at: Time! + updated_at: Time! +} \ No newline at end of file diff --git a/graphql/schema/types/filters.graphql b/graphql/schema/types/filters.graphql index a3dbb5287..850d46ad9 100644 --- a/graphql/schema/types/filters.graphql +++ b/graphql/schema/types/filters.graphql @@ -132,6 +132,8 @@ input SceneFilterType { phash: StringCriterionInput """Filter by path""" path: StringCriterionInput + """Filter by file count""" + file_count: IntCriterionInput """Filter by rating""" rating: IntCriterionInput """Filter by organized""" @@ -239,6 +241,8 @@ input GalleryFilterType { checksum: StringCriterionInput """Filter by path""" path: StringCriterionInput + """Filter by zip-file count""" + file_count: IntCriterionInput """Filter to only include galleries missing this property""" is_missing: String """Filter to include/exclude galleries that were created from zip""" @@ -327,6 +331,8 @@ input ImageFilterType { checksum: StringCriterionInput """Filter by path""" path: StringCriterionInput + """Filter by file count""" + file_count: IntCriterionInput """Filter by rating""" rating: IntCriterionInput """Filter by organized""" diff --git a/graphql/schema/types/gallery.graphql b/graphql/schema/types/gallery.graphql index a06c6a512..a129448ce 100644 --- a/graphql/schema/types/gallery.graphql +++ b/graphql/schema/types/gallery.graphql @@ -1,8 +1,8 @@ """Gallery type""" type Gallery { id: ID! - checksum: String! - path: String + checksum: String! @deprecated(reason: "Use files.fingerprints") + path: String @deprecated(reason: "Use files.path") title: String url: String date: String @@ -11,7 +11,10 @@ type Gallery { organized: Boolean! created_at: Time! updated_at: Time! - file_mod_time: Time + file_mod_time: Time @deprecated(reason: "Use files.mod_time") + + files: [GalleryFile!]! + folder: Folder scenes: [Scene!]! studio: Studio @@ -24,12 +27,6 @@ type Gallery { cover: Image } -type GalleryFilesType { - index: Int! - name: String - path: String -} - input GalleryCreateInput { title: String! url: String diff --git a/graphql/schema/types/image.graphql b/graphql/schema/types/image.graphql index da3b56ee6..3e3af9cef 100644 --- a/graphql/schema/types/image.graphql +++ b/graphql/schema/types/image.graphql @@ -1,16 +1,18 @@ type Image { id: ID! - checksum: String + checksum: String @deprecated(reason: "Use files.fingerprints") title: String rating: Int o_counter: Int organized: Boolean! - path: String! + path: String! @deprecated(reason: "Use files.path") created_at: Time! updated_at: Time! - file_mod_time: Time + + file_mod_time: Time @deprecated(reason: "Use files.mod_time") - file: ImageFileType! # Resolver + file: ImageFileType! @deprecated(reason: "Use files.mod_time") + files: [ImageFile!]! paths: ImagePathsType! # Resolver galleries: [Gallery!]! @@ -20,9 +22,10 @@ type Image { } type ImageFileType { - size: Int - width: Int - height: Int + mod_time: Time! + size: Int! + width: Int! + height: Int! } type ImagePathsType { diff --git a/graphql/schema/types/metadata.graphql b/graphql/schema/types/metadata.graphql index d8b94d477..96784ee9d 100644 --- a/graphql/schema/types/metadata.graphql +++ b/graphql/schema/types/metadata.graphql @@ -71,10 +71,19 @@ input ScanMetaDataFilterInput { input ScanMetadataInput { paths: [String!] + # useFileMetadata is deprecated with the new file management system + # if this functionality is desired, then we can make a built in scraper instead. + """Set name, date, details from metadata (if present)""" - useFileMetadata: Boolean + useFileMetadata: Boolean @deprecated(reason: "Not implemented") + + # stripFileExtension is deprecated since we no longer set the title from the + # filename - it is automatically returned if the object has no title. If this + # functionality is desired, then we could make this an option to not include + # the extension in the auto-generated title. + """Strip file extension from title""" - stripFileExtension: Boolean + stripFileExtension: Boolean @deprecated(reason: "Not implemented") """Generate previews during scan""" scanGeneratePreviews: Boolean """Generate image previews during scan""" diff --git a/graphql/schema/types/scalars.graphql b/graphql/schema/types/scalars.graphql index f973887a5..26d21bfba 100644 --- a/graphql/schema/types/scalars.graphql +++ b/graphql/schema/types/scalars.graphql @@ -9,4 +9,6 @@ scalar Timestamp # generic JSON object scalar Map -scalar Any \ No newline at end of file +scalar Any + +scalar Int64 \ No newline at end of file diff --git a/graphql/schema/types/scene.graphql b/graphql/schema/types/scene.graphql index ff405415a..576e9b7f2 100644 --- a/graphql/schema/types/scene.graphql +++ b/graphql/schema/types/scene.graphql @@ -27,15 +27,15 @@ type SceneMovie { scene_index: Int } -type SceneCaption { +type VideoCaption { language_code: String! caption_type: String! } type Scene { id: ID! - checksum: String - oshash: String + checksum: String @deprecated(reason: "Use files.fingerprints") + oshash: String @deprecated(reason: "Use files.fingerprints") title: String details: String url: String @@ -43,16 +43,17 @@ type Scene { rating: Int organized: Boolean! o_counter: Int - path: String! - phash: String + path: String! @deprecated(reason: "Use files.path") + phash: String @deprecated(reason: "Use files.fingerprints") interactive: Boolean! interactive_speed: Int - captions: [SceneCaption!] + captions: [VideoCaption!] created_at: Time! updated_at: Time! file_mod_time: Time - file: SceneFileType! # Resolver + file: SceneFileType! @deprecated(reason: "Use files") + files: [VideoFile!]! paths: ScenePathsType! # Resolver scene_markers: [SceneMarker!]! diff --git a/internal/api/changeset_translator.go b/internal/api/changeset_translator.go index e1fc3868a..3dfb4a6a1 100644 --- a/internal/api/changeset_translator.go +++ b/internal/api/changeset_translator.go @@ -3,6 +3,7 @@ package api import ( "context" "database/sql" + "fmt" "strconv" "github.com/99designs/gqlgen/graphql" @@ -89,6 +90,14 @@ func (t changesetTranslator) nullString(value *string, field string) *sql.NullSt return ret } +func (t changesetTranslator) optionalString(value *string, field string) models.OptionalString { + if !t.hasField(field) { + return models.OptionalString{} + } + + return models.NewOptionalStringPtr(value) +} + func (t changesetTranslator) sqliteDate(value *string, field string) *models.SQLiteDate { if !t.hasField(field) { return nil @@ -104,6 +113,21 @@ func (t changesetTranslator) sqliteDate(value *string, field string) *models.SQL return ret } +func (t changesetTranslator) optionalDate(value *string, field string) models.OptionalDate { + if !t.hasField(field) { + return models.OptionalDate{} + } + + if value == nil { + return models.OptionalDate{ + Set: true, + Null: true, + } + } + + return models.NewOptionalDate(models.NewDate(*value)) +} + func (t changesetTranslator) nullInt64(value *int, field string) *sql.NullInt64 { if !t.hasField(field) { return nil @@ -119,6 +143,14 @@ func (t changesetTranslator) nullInt64(value *int, field string) *sql.NullInt64 return ret } +func (t changesetTranslator) optionalInt(value *int, field string) models.OptionalInt { + if !t.hasField(field) { + return models.OptionalInt{} + } + + return models.NewOptionalIntPtr(value) +} + func (t changesetTranslator) nullInt64FromString(value *string, field string) *sql.NullInt64 { if !t.hasField(field) { return nil @@ -134,6 +166,25 @@ func (t changesetTranslator) nullInt64FromString(value *string, field string) *s return ret } +func (t changesetTranslator) optionalIntFromString(value *string, field string) (models.OptionalInt, error) { + if !t.hasField(field) { + return models.OptionalInt{}, nil + } + + if value == nil { + return models.OptionalInt{ + Set: true, + Null: true, + }, nil + } + + vv, err := strconv.Atoi(*value) + if err != nil { + return models.OptionalInt{}, fmt.Errorf("converting %v to int: %w", *value, err) + } + return models.NewOptionalInt(vv), nil +} + func (t changesetTranslator) nullBool(value *bool, field string) *sql.NullBool { if !t.hasField(field) { return nil @@ -148,3 +199,11 @@ func (t changesetTranslator) nullBool(value *bool, field string) *sql.NullBool { return ret } + +func (t changesetTranslator) optionalBool(value *bool, field string) models.OptionalBool { + if !t.hasField(field) { + return models.OptionalBool{} + } + + return models.NewOptionalBoolPtr(value) +} diff --git a/internal/api/loaders/dataloaders.go b/internal/api/loaders/dataloaders.go new file mode 100644 index 000000000..30b865632 --- /dev/null +++ b/internal/api/loaders/dataloaders.go @@ -0,0 +1,261 @@ +//go:generate go run -mod=vendor github.com/vektah/dataloaden SceneLoader int *github.com/stashapp/stash/pkg/models.Scene +//go:generate go run -mod=vendor github.com/vektah/dataloaden GalleryLoader int *github.com/stashapp/stash/pkg/models.Gallery +//go:generate go run -mod=vendor github.com/vektah/dataloaden ImageLoader int *github.com/stashapp/stash/pkg/models.Image +//go:generate go run -mod=vendor github.com/vektah/dataloaden PerformerLoader int *github.com/stashapp/stash/pkg/models.Performer +//go:generate go run -mod=vendor github.com/vektah/dataloaden StudioLoader int *github.com/stashapp/stash/pkg/models.Studio +//go:generate go run -mod=vendor github.com/vektah/dataloaden TagLoader int *github.com/stashapp/stash/pkg/models.Tag +//go:generate go run -mod=vendor github.com/vektah/dataloaden MovieLoader int *github.com/stashapp/stash/pkg/models.Movie +//go:generate go run -mod=vendor github.com/vektah/dataloaden FileLoader github.com/stashapp/stash/pkg/file.ID github.com/stashapp/stash/pkg/file.File +//go:generate go run -mod=vendor github.com/vektah/dataloaden SceneFileIDsLoader int []github.com/stashapp/stash/pkg/file.ID +//go:generate go run -mod=vendor github.com/vektah/dataloaden ImageFileIDsLoader int []github.com/stashapp/stash/pkg/file.ID +//go:generate go run -mod=vendor github.com/vektah/dataloaden GalleryFileIDsLoader int []github.com/stashapp/stash/pkg/file.ID + +package loaders + +import ( + "context" + "net/http" + "time" + + "github.com/stashapp/stash/internal/manager" + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" +) + +type contextKey struct{ name string } + +var ( + loadersCtxKey = &contextKey{"loaders"} +) + +const ( + wait = 1 * time.Millisecond + maxBatch = 100 +) + +type Loaders struct { + SceneByID *SceneLoader + SceneFiles *SceneFileIDsLoader + ImageFiles *ImageFileIDsLoader + GalleryFiles *GalleryFileIDsLoader + + GalleryByID *GalleryLoader + ImageByID *ImageLoader + PerformerByID *PerformerLoader + StudioByID *StudioLoader + TagByID *TagLoader + MovieByID *MovieLoader + FileByID *FileLoader +} + +type Middleware struct { + DatabaseProvider txn.DatabaseProvider + Repository manager.Repository +} + +func (m Middleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ldrs := Loaders{ + SceneByID: &SceneLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchScenes(ctx), + }, + GalleryByID: &GalleryLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchGalleries(ctx), + }, + ImageByID: &ImageLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchImages(ctx), + }, + PerformerByID: &PerformerLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchPerformers(ctx), + }, + StudioByID: &StudioLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchStudios(ctx), + }, + TagByID: &TagLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchTags(ctx), + }, + MovieByID: &MovieLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchMovies(ctx), + }, + FileByID: &FileLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchFiles(ctx), + }, + SceneFiles: &SceneFileIDsLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchScenesFileIDs(ctx), + }, + ImageFiles: &ImageFileIDsLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchImagesFileIDs(ctx), + }, + GalleryFiles: &GalleryFileIDsLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchGalleriesFileIDs(ctx), + }, + } + + newCtx := context.WithValue(r.Context(), loadersCtxKey, ldrs) + next.ServeHTTP(w, r.WithContext(newCtx)) + }) +} + +func From(ctx context.Context) Loaders { + return ctx.Value(loadersCtxKey).(Loaders) +} + +func toErrorSlice(err error) []error { + if err != nil { + return []error{err} + } + + return nil +} + +func (m Middleware) withTxn(ctx context.Context, fn func(ctx context.Context) error) error { + return txn.WithDatabase(ctx, m.DatabaseProvider, fn) +} + +func (m Middleware) fetchScenes(ctx context.Context) func(keys []int) ([]*models.Scene, []error) { + return func(keys []int) (ret []*models.Scene, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Scene.FindMany(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchImages(ctx context.Context) func(keys []int) ([]*models.Image, []error) { + return func(keys []int) (ret []*models.Image, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Image.FindMany(ctx, keys) + return err + }) + + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchGalleries(ctx context.Context) func(keys []int) ([]*models.Gallery, []error) { + return func(keys []int) (ret []*models.Gallery, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Gallery.FindMany(ctx, keys) + return err + }) + + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchPerformers(ctx context.Context) func(keys []int) ([]*models.Performer, []error) { + return func(keys []int) (ret []*models.Performer, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Performer.FindMany(ctx, keys) + return err + }) + + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchStudios(ctx context.Context) func(keys []int) ([]*models.Studio, []error) { + return func(keys []int) (ret []*models.Studio, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Studio.FindMany(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchTags(ctx context.Context) func(keys []int) ([]*models.Tag, []error) { + return func(keys []int) (ret []*models.Tag, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Tag.FindMany(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchMovies(ctx context.Context) func(keys []int) ([]*models.Movie, []error) { + return func(keys []int) (ret []*models.Movie, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Movie.FindMany(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchFiles(ctx context.Context) func(keys []file.ID) ([]file.File, []error) { + return func(keys []file.ID) (ret []file.File, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.File.Find(ctx, keys...) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchScenesFileIDs(ctx context.Context) func(keys []int) ([][]file.ID, []error) { + return func(keys []int) (ret [][]file.ID, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Scene.GetManyFileIDs(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchImagesFileIDs(ctx context.Context) func(keys []int) ([][]file.ID, []error) { + return func(keys []int) (ret [][]file.ID, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Image.GetManyFileIDs(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchGalleriesFileIDs(ctx context.Context) func(keys []int) ([][]file.ID, []error) { + return func(keys []int) (ret [][]file.ID, errs []error) { + err := m.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Gallery.GetManyFileIDs(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} diff --git a/internal/api/loaders/fileloader_gen.go b/internal/api/loaders/fileloader_gen.go new file mode 100644 index 000000000..348dcbb7f --- /dev/null +++ b/internal/api/loaders/fileloader_gen.go @@ -0,0 +1,221 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/file" +) + +// FileLoaderConfig captures the config to create a new FileLoader +type FileLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []file.ID) ([]file.File, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewFileLoader creates a new FileLoader given a fetch, wait, and maxBatch +func NewFileLoader(config FileLoaderConfig) *FileLoader { + return &FileLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// FileLoader batches and caches requests +type FileLoader struct { + // this method provides the data for the loader + fetch func(keys []file.ID) ([]file.File, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[file.ID]file.File + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *fileLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type fileLoaderBatch struct { + keys []file.ID + data []file.File + error []error + closing bool + done chan struct{} +} + +// Load a File by key, batching and caching will be applied automatically +func (l *FileLoader) Load(key file.ID) (file.File, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a File. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *FileLoader) LoadThunk(key file.ID) func() (file.File, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (file.File, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &fileLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (file.File, error) { + <-batch.done + + var data file.File + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *FileLoader) LoadAll(keys []file.ID) ([]file.File, []error) { + results := make([]func() (file.File, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + files := make([]file.File, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + files[i], errors[i] = thunk() + } + return files, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Files. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *FileLoader) LoadAllThunk(keys []file.ID) func() ([]file.File, []error) { + results := make([]func() (file.File, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]file.File, []error) { + files := make([]file.File, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + files[i], errors[i] = thunk() + } + return files, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *FileLoader) Prime(key file.ID, value file.File) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + l.unsafeSet(key, value) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *FileLoader) Clear(key file.ID) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *FileLoader) unsafeSet(key file.ID, value file.File) { + if l.cache == nil { + l.cache = map[file.ID]file.File{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *fileLoaderBatch) keyIndex(l *FileLoader, key file.ID) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *fileLoaderBatch) startTimer(l *FileLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *fileLoaderBatch) end(l *FileLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/galleryfileidsloader_gen.go b/internal/api/loaders/galleryfileidsloader_gen.go new file mode 100644 index 000000000..808cfbf0f --- /dev/null +++ b/internal/api/loaders/galleryfileidsloader_gen.go @@ -0,0 +1,225 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/file" +) + +// GalleryFileIDsLoaderConfig captures the config to create a new GalleryFileIDsLoader +type GalleryFileIDsLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([][]file.ID, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewGalleryFileIDsLoader creates a new GalleryFileIDsLoader given a fetch, wait, and maxBatch +func NewGalleryFileIDsLoader(config GalleryFileIDsLoaderConfig) *GalleryFileIDsLoader { + return &GalleryFileIDsLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// GalleryFileIDsLoader batches and caches requests +type GalleryFileIDsLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([][]file.ID, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int][]file.ID + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *galleryFileIDsLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type galleryFileIDsLoaderBatch struct { + keys []int + data [][]file.ID + error []error + closing bool + done chan struct{} +} + +// Load a ID by key, batching and caching will be applied automatically +func (l *GalleryFileIDsLoader) Load(key int) ([]file.ID, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a ID. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *GalleryFileIDsLoader) LoadThunk(key int) func() ([]file.ID, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() ([]file.ID, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &galleryFileIDsLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() ([]file.ID, error) { + <-batch.done + + var data []file.ID + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *GalleryFileIDsLoader) LoadAll(keys []int) ([][]file.ID, []error) { + results := make([]func() ([]file.ID, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + iDs := make([][]file.ID, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + iDs[i], errors[i] = thunk() + } + return iDs, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a IDs. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *GalleryFileIDsLoader) LoadAllThunk(keys []int) func() ([][]file.ID, []error) { + results := make([]func() ([]file.ID, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([][]file.ID, []error) { + iDs := make([][]file.ID, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + iDs[i], errors[i] = thunk() + } + return iDs, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *GalleryFileIDsLoader) Prime(key int, value []file.ID) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := make([]file.ID, len(value)) + copy(cpy, value) + l.unsafeSet(key, cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *GalleryFileIDsLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *GalleryFileIDsLoader) unsafeSet(key int, value []file.ID) { + if l.cache == nil { + l.cache = map[int][]file.ID{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *galleryFileIDsLoaderBatch) keyIndex(l *GalleryFileIDsLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *galleryFileIDsLoaderBatch) startTimer(l *GalleryFileIDsLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *galleryFileIDsLoaderBatch) end(l *GalleryFileIDsLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/galleryloader_gen.go b/internal/api/loaders/galleryloader_gen.go new file mode 100644 index 000000000..0f8887d6a --- /dev/null +++ b/internal/api/loaders/galleryloader_gen.go @@ -0,0 +1,224 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/models" +) + +// GalleryLoaderConfig captures the config to create a new GalleryLoader +type GalleryLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]*models.Gallery, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewGalleryLoader creates a new GalleryLoader given a fetch, wait, and maxBatch +func NewGalleryLoader(config GalleryLoaderConfig) *GalleryLoader { + return &GalleryLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// GalleryLoader batches and caches requests +type GalleryLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]*models.Gallery, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]*models.Gallery + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *galleryLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type galleryLoaderBatch struct { + keys []int + data []*models.Gallery + error []error + closing bool + done chan struct{} +} + +// Load a Gallery by key, batching and caching will be applied automatically +func (l *GalleryLoader) Load(key int) (*models.Gallery, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Gallery. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *GalleryLoader) LoadThunk(key int) func() (*models.Gallery, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (*models.Gallery, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &galleryLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (*models.Gallery, error) { + <-batch.done + + var data *models.Gallery + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *GalleryLoader) LoadAll(keys []int) ([]*models.Gallery, []error) { + results := make([]func() (*models.Gallery, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + gallerys := make([]*models.Gallery, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + gallerys[i], errors[i] = thunk() + } + return gallerys, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Gallerys. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *GalleryLoader) LoadAllThunk(keys []int) func() ([]*models.Gallery, []error) { + results := make([]func() (*models.Gallery, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]*models.Gallery, []error) { + gallerys := make([]*models.Gallery, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + gallerys[i], errors[i] = thunk() + } + return gallerys, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *GalleryLoader) Prime(key int, value *models.Gallery) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *GalleryLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *GalleryLoader) unsafeSet(key int, value *models.Gallery) { + if l.cache == nil { + l.cache = map[int]*models.Gallery{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *galleryLoaderBatch) keyIndex(l *GalleryLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *galleryLoaderBatch) startTimer(l *GalleryLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *galleryLoaderBatch) end(l *GalleryLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/imagefileidsloader_gen.go b/internal/api/loaders/imagefileidsloader_gen.go new file mode 100644 index 000000000..7e633d8ef --- /dev/null +++ b/internal/api/loaders/imagefileidsloader_gen.go @@ -0,0 +1,225 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/file" +) + +// ImageFileIDsLoaderConfig captures the config to create a new ImageFileIDsLoader +type ImageFileIDsLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([][]file.ID, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewImageFileIDsLoader creates a new ImageFileIDsLoader given a fetch, wait, and maxBatch +func NewImageFileIDsLoader(config ImageFileIDsLoaderConfig) *ImageFileIDsLoader { + return &ImageFileIDsLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// ImageFileIDsLoader batches and caches requests +type ImageFileIDsLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([][]file.ID, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int][]file.ID + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *imageFileIDsLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type imageFileIDsLoaderBatch struct { + keys []int + data [][]file.ID + error []error + closing bool + done chan struct{} +} + +// Load a ID by key, batching and caching will be applied automatically +func (l *ImageFileIDsLoader) Load(key int) ([]file.ID, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a ID. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *ImageFileIDsLoader) LoadThunk(key int) func() ([]file.ID, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() ([]file.ID, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &imageFileIDsLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() ([]file.ID, error) { + <-batch.done + + var data []file.ID + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *ImageFileIDsLoader) LoadAll(keys []int) ([][]file.ID, []error) { + results := make([]func() ([]file.ID, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + iDs := make([][]file.ID, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + iDs[i], errors[i] = thunk() + } + return iDs, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a IDs. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *ImageFileIDsLoader) LoadAllThunk(keys []int) func() ([][]file.ID, []error) { + results := make([]func() ([]file.ID, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([][]file.ID, []error) { + iDs := make([][]file.ID, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + iDs[i], errors[i] = thunk() + } + return iDs, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *ImageFileIDsLoader) Prime(key int, value []file.ID) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := make([]file.ID, len(value)) + copy(cpy, value) + l.unsafeSet(key, cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *ImageFileIDsLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *ImageFileIDsLoader) unsafeSet(key int, value []file.ID) { + if l.cache == nil { + l.cache = map[int][]file.ID{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *imageFileIDsLoaderBatch) keyIndex(l *ImageFileIDsLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *imageFileIDsLoaderBatch) startTimer(l *ImageFileIDsLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *imageFileIDsLoaderBatch) end(l *ImageFileIDsLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/imageloader_gen.go b/internal/api/loaders/imageloader_gen.go new file mode 100644 index 000000000..b726f3aa5 --- /dev/null +++ b/internal/api/loaders/imageloader_gen.go @@ -0,0 +1,224 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/models" +) + +// ImageLoaderConfig captures the config to create a new ImageLoader +type ImageLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]*models.Image, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewImageLoader creates a new ImageLoader given a fetch, wait, and maxBatch +func NewImageLoader(config ImageLoaderConfig) *ImageLoader { + return &ImageLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// ImageLoader batches and caches requests +type ImageLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]*models.Image, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]*models.Image + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *imageLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type imageLoaderBatch struct { + keys []int + data []*models.Image + error []error + closing bool + done chan struct{} +} + +// Load a Image by key, batching and caching will be applied automatically +func (l *ImageLoader) Load(key int) (*models.Image, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Image. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *ImageLoader) LoadThunk(key int) func() (*models.Image, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (*models.Image, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &imageLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (*models.Image, error) { + <-batch.done + + var data *models.Image + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *ImageLoader) LoadAll(keys []int) ([]*models.Image, []error) { + results := make([]func() (*models.Image, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + images := make([]*models.Image, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + images[i], errors[i] = thunk() + } + return images, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Images. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *ImageLoader) LoadAllThunk(keys []int) func() ([]*models.Image, []error) { + results := make([]func() (*models.Image, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]*models.Image, []error) { + images := make([]*models.Image, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + images[i], errors[i] = thunk() + } + return images, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *ImageLoader) Prime(key int, value *models.Image) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *ImageLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *ImageLoader) unsafeSet(key int, value *models.Image) { + if l.cache == nil { + l.cache = map[int]*models.Image{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *imageLoaderBatch) keyIndex(l *ImageLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *imageLoaderBatch) startTimer(l *ImageLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *imageLoaderBatch) end(l *ImageLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/movieloader_gen.go b/internal/api/loaders/movieloader_gen.go new file mode 100644 index 000000000..3783d3a41 --- /dev/null +++ b/internal/api/loaders/movieloader_gen.go @@ -0,0 +1,224 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/models" +) + +// MovieLoaderConfig captures the config to create a new MovieLoader +type MovieLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]*models.Movie, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewMovieLoader creates a new MovieLoader given a fetch, wait, and maxBatch +func NewMovieLoader(config MovieLoaderConfig) *MovieLoader { + return &MovieLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// MovieLoader batches and caches requests +type MovieLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]*models.Movie, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]*models.Movie + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *movieLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type movieLoaderBatch struct { + keys []int + data []*models.Movie + error []error + closing bool + done chan struct{} +} + +// Load a Movie by key, batching and caching will be applied automatically +func (l *MovieLoader) Load(key int) (*models.Movie, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Movie. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *MovieLoader) LoadThunk(key int) func() (*models.Movie, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (*models.Movie, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &movieLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (*models.Movie, error) { + <-batch.done + + var data *models.Movie + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *MovieLoader) LoadAll(keys []int) ([]*models.Movie, []error) { + results := make([]func() (*models.Movie, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + movies := make([]*models.Movie, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + movies[i], errors[i] = thunk() + } + return movies, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Movies. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *MovieLoader) LoadAllThunk(keys []int) func() ([]*models.Movie, []error) { + results := make([]func() (*models.Movie, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]*models.Movie, []error) { + movies := make([]*models.Movie, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + movies[i], errors[i] = thunk() + } + return movies, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *MovieLoader) Prime(key int, value *models.Movie) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *MovieLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *MovieLoader) unsafeSet(key int, value *models.Movie) { + if l.cache == nil { + l.cache = map[int]*models.Movie{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *movieLoaderBatch) keyIndex(l *MovieLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *movieLoaderBatch) startTimer(l *MovieLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *movieLoaderBatch) end(l *MovieLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/performerloader_gen.go b/internal/api/loaders/performerloader_gen.go new file mode 100644 index 000000000..d380b60b1 --- /dev/null +++ b/internal/api/loaders/performerloader_gen.go @@ -0,0 +1,224 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/models" +) + +// PerformerLoaderConfig captures the config to create a new PerformerLoader +type PerformerLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]*models.Performer, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewPerformerLoader creates a new PerformerLoader given a fetch, wait, and maxBatch +func NewPerformerLoader(config PerformerLoaderConfig) *PerformerLoader { + return &PerformerLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// PerformerLoader batches and caches requests +type PerformerLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]*models.Performer, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]*models.Performer + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *performerLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type performerLoaderBatch struct { + keys []int + data []*models.Performer + error []error + closing bool + done chan struct{} +} + +// Load a Performer by key, batching and caching will be applied automatically +func (l *PerformerLoader) Load(key int) (*models.Performer, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Performer. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *PerformerLoader) LoadThunk(key int) func() (*models.Performer, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (*models.Performer, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &performerLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (*models.Performer, error) { + <-batch.done + + var data *models.Performer + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *PerformerLoader) LoadAll(keys []int) ([]*models.Performer, []error) { + results := make([]func() (*models.Performer, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + performers := make([]*models.Performer, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + performers[i], errors[i] = thunk() + } + return performers, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Performers. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *PerformerLoader) LoadAllThunk(keys []int) func() ([]*models.Performer, []error) { + results := make([]func() (*models.Performer, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]*models.Performer, []error) { + performers := make([]*models.Performer, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + performers[i], errors[i] = thunk() + } + return performers, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *PerformerLoader) Prime(key int, value *models.Performer) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *PerformerLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *PerformerLoader) unsafeSet(key int, value *models.Performer) { + if l.cache == nil { + l.cache = map[int]*models.Performer{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *performerLoaderBatch) keyIndex(l *PerformerLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *performerLoaderBatch) startTimer(l *PerformerLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *performerLoaderBatch) end(l *PerformerLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/scenefileidsloader_gen.go b/internal/api/loaders/scenefileidsloader_gen.go new file mode 100644 index 000000000..663be2c6f --- /dev/null +++ b/internal/api/loaders/scenefileidsloader_gen.go @@ -0,0 +1,225 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/file" +) + +// SceneFileIDsLoaderConfig captures the config to create a new SceneFileIDsLoader +type SceneFileIDsLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([][]file.ID, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewSceneFileIDsLoader creates a new SceneFileIDsLoader given a fetch, wait, and maxBatch +func NewSceneFileIDsLoader(config SceneFileIDsLoaderConfig) *SceneFileIDsLoader { + return &SceneFileIDsLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// SceneFileIDsLoader batches and caches requests +type SceneFileIDsLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([][]file.ID, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int][]file.ID + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *sceneFileIDsLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type sceneFileIDsLoaderBatch struct { + keys []int + data [][]file.ID + error []error + closing bool + done chan struct{} +} + +// Load a ID by key, batching and caching will be applied automatically +func (l *SceneFileIDsLoader) Load(key int) ([]file.ID, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a ID. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *SceneFileIDsLoader) LoadThunk(key int) func() ([]file.ID, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() ([]file.ID, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &sceneFileIDsLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() ([]file.ID, error) { + <-batch.done + + var data []file.ID + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *SceneFileIDsLoader) LoadAll(keys []int) ([][]file.ID, []error) { + results := make([]func() ([]file.ID, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + iDs := make([][]file.ID, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + iDs[i], errors[i] = thunk() + } + return iDs, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a IDs. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *SceneFileIDsLoader) LoadAllThunk(keys []int) func() ([][]file.ID, []error) { + results := make([]func() ([]file.ID, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([][]file.ID, []error) { + iDs := make([][]file.ID, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + iDs[i], errors[i] = thunk() + } + return iDs, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *SceneFileIDsLoader) Prime(key int, value []file.ID) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := make([]file.ID, len(value)) + copy(cpy, value) + l.unsafeSet(key, cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *SceneFileIDsLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *SceneFileIDsLoader) unsafeSet(key int, value []file.ID) { + if l.cache == nil { + l.cache = map[int][]file.ID{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *sceneFileIDsLoaderBatch) keyIndex(l *SceneFileIDsLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *sceneFileIDsLoaderBatch) startTimer(l *SceneFileIDsLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *sceneFileIDsLoaderBatch) end(l *SceneFileIDsLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/sceneloader_gen.go b/internal/api/loaders/sceneloader_gen.go new file mode 100644 index 000000000..1636383db --- /dev/null +++ b/internal/api/loaders/sceneloader_gen.go @@ -0,0 +1,224 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/models" +) + +// SceneLoaderConfig captures the config to create a new SceneLoader +type SceneLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]*models.Scene, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewSceneLoader creates a new SceneLoader given a fetch, wait, and maxBatch +func NewSceneLoader(config SceneLoaderConfig) *SceneLoader { + return &SceneLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// SceneLoader batches and caches requests +type SceneLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]*models.Scene, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]*models.Scene + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *sceneLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type sceneLoaderBatch struct { + keys []int + data []*models.Scene + error []error + closing bool + done chan struct{} +} + +// Load a Scene by key, batching and caching will be applied automatically +func (l *SceneLoader) Load(key int) (*models.Scene, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Scene. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *SceneLoader) LoadThunk(key int) func() (*models.Scene, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (*models.Scene, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &sceneLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (*models.Scene, error) { + <-batch.done + + var data *models.Scene + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *SceneLoader) LoadAll(keys []int) ([]*models.Scene, []error) { + results := make([]func() (*models.Scene, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + scenes := make([]*models.Scene, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + scenes[i], errors[i] = thunk() + } + return scenes, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Scenes. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *SceneLoader) LoadAllThunk(keys []int) func() ([]*models.Scene, []error) { + results := make([]func() (*models.Scene, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]*models.Scene, []error) { + scenes := make([]*models.Scene, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + scenes[i], errors[i] = thunk() + } + return scenes, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *SceneLoader) Prime(key int, value *models.Scene) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *SceneLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *SceneLoader) unsafeSet(key int, value *models.Scene) { + if l.cache == nil { + l.cache = map[int]*models.Scene{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *sceneLoaderBatch) keyIndex(l *SceneLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *sceneLoaderBatch) startTimer(l *SceneLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *sceneLoaderBatch) end(l *SceneLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/studioloader_gen.go b/internal/api/loaders/studioloader_gen.go new file mode 100644 index 000000000..877437dad --- /dev/null +++ b/internal/api/loaders/studioloader_gen.go @@ -0,0 +1,224 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/models" +) + +// StudioLoaderConfig captures the config to create a new StudioLoader +type StudioLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]*models.Studio, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewStudioLoader creates a new StudioLoader given a fetch, wait, and maxBatch +func NewStudioLoader(config StudioLoaderConfig) *StudioLoader { + return &StudioLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// StudioLoader batches and caches requests +type StudioLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]*models.Studio, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]*models.Studio + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *studioLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type studioLoaderBatch struct { + keys []int + data []*models.Studio + error []error + closing bool + done chan struct{} +} + +// Load a Studio by key, batching and caching will be applied automatically +func (l *StudioLoader) Load(key int) (*models.Studio, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Studio. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *StudioLoader) LoadThunk(key int) func() (*models.Studio, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (*models.Studio, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &studioLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (*models.Studio, error) { + <-batch.done + + var data *models.Studio + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *StudioLoader) LoadAll(keys []int) ([]*models.Studio, []error) { + results := make([]func() (*models.Studio, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + studios := make([]*models.Studio, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + studios[i], errors[i] = thunk() + } + return studios, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Studios. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *StudioLoader) LoadAllThunk(keys []int) func() ([]*models.Studio, []error) { + results := make([]func() (*models.Studio, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]*models.Studio, []error) { + studios := make([]*models.Studio, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + studios[i], errors[i] = thunk() + } + return studios, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *StudioLoader) Prime(key int, value *models.Studio) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *StudioLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *StudioLoader) unsafeSet(key int, value *models.Studio) { + if l.cache == nil { + l.cache = map[int]*models.Studio{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *studioLoaderBatch) keyIndex(l *StudioLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *studioLoaderBatch) startTimer(l *StudioLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *studioLoaderBatch) end(l *StudioLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/tagloader_gen.go b/internal/api/loaders/tagloader_gen.go new file mode 100644 index 000000000..45d70f639 --- /dev/null +++ b/internal/api/loaders/tagloader_gen.go @@ -0,0 +1,224 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/models" +) + +// TagLoaderConfig captures the config to create a new TagLoader +type TagLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]*models.Tag, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewTagLoader creates a new TagLoader given a fetch, wait, and maxBatch +func NewTagLoader(config TagLoaderConfig) *TagLoader { + return &TagLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// TagLoader batches and caches requests +type TagLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]*models.Tag, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]*models.Tag + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *tagLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type tagLoaderBatch struct { + keys []int + data []*models.Tag + error []error + closing bool + done chan struct{} +} + +// Load a Tag by key, batching and caching will be applied automatically +func (l *TagLoader) Load(key int) (*models.Tag, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Tag. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *TagLoader) LoadThunk(key int) func() (*models.Tag, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (*models.Tag, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &tagLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (*models.Tag, error) { + <-batch.done + + var data *models.Tag + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *TagLoader) LoadAll(keys []int) ([]*models.Tag, []error) { + results := make([]func() (*models.Tag, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + tags := make([]*models.Tag, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + tags[i], errors[i] = thunk() + } + return tags, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Tags. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *TagLoader) LoadAllThunk(keys []int) func() ([]*models.Tag, []error) { + results := make([]func() (*models.Tag, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]*models.Tag, []error) { + tags := make([]*models.Tag, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + tags[i], errors[i] = thunk() + } + return tags, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *TagLoader) Prime(key int, value *models.Tag) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *TagLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *TagLoader) unsafeSet(key int, value *models.Tag) { + if l.cache == nil { + l.cache = map[int]*models.Tag{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *tagLoaderBatch) keyIndex(l *TagLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *tagLoaderBatch) startTimer(l *TagLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *tagLoaderBatch) end(l *TagLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/resolver.go b/internal/api/resolver.go index 3dbdd9fa0..5db47c3b9 100644 --- a/internal/api/resolver.go +++ b/internal/api/resolver.go @@ -11,6 +11,7 @@ import ( "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/plugin" "github.com/stashapp/stash/pkg/scraper" + "github.com/stashapp/stash/pkg/txn" ) var ( @@ -30,7 +31,12 @@ type hookExecutor interface { } type Resolver struct { - txnManager models.TransactionManager + txnManager txn.Manager + repository manager.Repository + sceneService manager.SceneService + imageService manager.ImageService + galleryService manager.GalleryService + hookExecutor hookExecutor } @@ -38,37 +44,37 @@ func (r *Resolver) scraperCache() *scraper.Cache { return manager.GetInstance().ScraperCache } -func (r *Resolver) Gallery() models.GalleryResolver { +func (r *Resolver) Gallery() GalleryResolver { return &galleryResolver{r} } -func (r *Resolver) Mutation() models.MutationResolver { +func (r *Resolver) Mutation() MutationResolver { return &mutationResolver{r} } -func (r *Resolver) Performer() models.PerformerResolver { +func (r *Resolver) Performer() PerformerResolver { return &performerResolver{r} } -func (r *Resolver) Query() models.QueryResolver { +func (r *Resolver) Query() QueryResolver { return &queryResolver{r} } -func (r *Resolver) Scene() models.SceneResolver { +func (r *Resolver) Scene() SceneResolver { return &sceneResolver{r} } -func (r *Resolver) Image() models.ImageResolver { +func (r *Resolver) Image() ImageResolver { return &imageResolver{r} } -func (r *Resolver) SceneMarker() models.SceneMarkerResolver { +func (r *Resolver) SceneMarker() SceneMarkerResolver { return &sceneMarkerResolver{r} } -func (r *Resolver) Studio() models.StudioResolver { +func (r *Resolver) Studio() StudioResolver { return &studioResolver{r} } -func (r *Resolver) Movie() models.MovieResolver { +func (r *Resolver) Movie() MovieResolver { return &movieResolver{r} } -func (r *Resolver) Subscription() models.SubscriptionResolver { +func (r *Resolver) Subscription() SubscriptionResolver { return &subscriptionResolver{r} } -func (r *Resolver) Tag() models.TagResolver { +func (r *Resolver) Tag() TagResolver { return &tagResolver{r} } @@ -85,17 +91,13 @@ type studioResolver struct{ *Resolver } type movieResolver struct{ *Resolver } type tagResolver struct{ *Resolver } -func (r *Resolver) withTxn(ctx context.Context, fn func(r models.Repository) error) error { - return r.txnManager.WithTxn(ctx, fn) -} - -func (r *Resolver) withReadTxn(ctx context.Context, fn func(r models.ReaderRepository) error) error { - return r.txnManager.WithReadTxn(ctx, fn) +func (r *Resolver) withTxn(ctx context.Context, fn func(ctx context.Context) error) error { + return txn.WithTxn(ctx, r.txnManager, fn) } func (r *queryResolver) MarkerWall(ctx context.Context, q *string) (ret []*models.SceneMarker, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.SceneMarker().Wall(q) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.SceneMarker.Wall(ctx, q) return err }); err != nil { return nil, err @@ -104,8 +106,8 @@ func (r *queryResolver) MarkerWall(ctx context.Context, q *string) (ret []*model } func (r *queryResolver) SceneWall(ctx context.Context, q *string) (ret []*models.Scene, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Scene().Wall(q) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Scene.Wall(ctx, q) return err }); err != nil { return nil, err @@ -115,8 +117,8 @@ func (r *queryResolver) SceneWall(ctx context.Context, q *string) (ret []*models } func (r *queryResolver) MarkerStrings(ctx context.Context, q *string, sort *string) (ret []*models.MarkerStringsResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.SceneMarker().GetMarkerStrings(q, sort) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.SceneMarker.GetMarkerStrings(ctx, q, sort) return err }); err != nil { return nil, err @@ -125,28 +127,29 @@ func (r *queryResolver) MarkerStrings(ctx context.Context, q *string, sort *stri return ret, nil } -func (r *queryResolver) Stats(ctx context.Context) (*models.StatsResultType, error) { - var ret models.StatsResultType - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - scenesQB := repo.Scene() - imageQB := repo.Image() - galleryQB := repo.Gallery() - studiosQB := repo.Studio() - performersQB := repo.Performer() - moviesQB := repo.Movie() - tagsQB := repo.Tag() - scenesCount, _ := scenesQB.Count() - scenesSize, _ := scenesQB.Size() - scenesDuration, _ := scenesQB.Duration() - imageCount, _ := imageQB.Count() - imageSize, _ := imageQB.Size() - galleryCount, _ := galleryQB.Count() - performersCount, _ := performersQB.Count() - studiosCount, _ := studiosQB.Count() - moviesCount, _ := moviesQB.Count() - tagsCount, _ := tagsQB.Count() +func (r *queryResolver) Stats(ctx context.Context) (*StatsResultType, error) { + var ret StatsResultType + if err := r.withTxn(ctx, func(ctx context.Context) error { + repo := r.repository + scenesQB := repo.Scene + imageQB := repo.Image + galleryQB := repo.Gallery + studiosQB := repo.Studio + performersQB := repo.Performer + moviesQB := repo.Movie + tagsQB := repo.Tag + scenesCount, _ := scenesQB.Count(ctx) + scenesSize, _ := scenesQB.Size(ctx) + scenesDuration, _ := scenesQB.Duration(ctx) + imageCount, _ := imageQB.Count(ctx) + imageSize, _ := imageQB.Size(ctx) + galleryCount, _ := galleryQB.Count(ctx) + performersCount, _ := performersQB.Count(ctx) + studiosCount, _ := studiosQB.Count(ctx) + moviesCount, _ := moviesQB.Count(ctx) + tagsCount, _ := tagsQB.Count(ctx) - ret = models.StatsResultType{ + ret = StatsResultType{ SceneCount: scenesCount, ScenesSize: scenesSize, ScenesDuration: scenesDuration, @@ -167,10 +170,10 @@ func (r *queryResolver) Stats(ctx context.Context) (*models.StatsResultType, err return &ret, nil } -func (r *queryResolver) Version(ctx context.Context) (*models.Version, error) { +func (r *queryResolver) Version(ctx context.Context) (*Version, error) { version, hash, buildtime := GetVersion() - return &models.Version{ + return &Version{ Version: &version, Hash: hash, BuildTime: buildtime, @@ -178,7 +181,7 @@ func (r *queryResolver) Version(ctx context.Context) (*models.Version, error) { } // Latestversion returns the latest git shorthash commit. -func (r *queryResolver) Latestversion(ctx context.Context) (*models.ShortVersion, error) { +func (r *queryResolver) Latestversion(ctx context.Context) (*ShortVersion, error) { ver, url, err := GetLatestVersion(ctx, true) if err == nil { logger.Infof("Retrieved latest hash: %s", ver) @@ -186,37 +189,37 @@ func (r *queryResolver) Latestversion(ctx context.Context) (*models.ShortVersion logger.Errorf("Error while retrieving latest hash: %s", err) } - return &models.ShortVersion{ + return &ShortVersion{ Shorthash: ver, URL: url, }, err } // Get scene marker tags which show up under the video. -func (r *queryResolver) SceneMarkerTags(ctx context.Context, scene_id string) ([]*models.SceneMarkerTag, error) { +func (r *queryResolver) SceneMarkerTags(ctx context.Context, scene_id string) ([]*SceneMarkerTag, error) { sceneID, err := strconv.Atoi(scene_id) if err != nil { return nil, err } var keys []int - tags := make(map[int]*models.SceneMarkerTag) + tags := make(map[int]*SceneMarkerTag) - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - sceneMarkers, err := repo.SceneMarker().FindBySceneID(sceneID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + sceneMarkers, err := r.repository.SceneMarker.FindBySceneID(ctx, sceneID) if err != nil { return err } - tqb := repo.Tag() + tqb := r.repository.Tag for _, sceneMarker := range sceneMarkers { - markerPrimaryTag, err := tqb.Find(sceneMarker.PrimaryTagID) + markerPrimaryTag, err := tqb.Find(ctx, sceneMarker.PrimaryTagID) if err != nil { return err } _, hasKey := tags[markerPrimaryTag.ID] if !hasKey { - sceneMarkerTag := &models.SceneMarkerTag{Tag: markerPrimaryTag} + sceneMarkerTag := &SceneMarkerTag{Tag: markerPrimaryTag} tags[markerPrimaryTag.ID] = sceneMarkerTag keys = append(keys, markerPrimaryTag.ID) } @@ -235,10 +238,20 @@ func (r *queryResolver) SceneMarkerTags(ctx context.Context, scene_id string) ([ return a.SceneMarkers[0].Seconds < b.SceneMarkers[0].Seconds }) - var result []*models.SceneMarkerTag + var result []*SceneMarkerTag for _, key := range keys { result = append(result, tags[key]) } return result, nil } + +func firstError(errs []error) error { + for _, e := range errs { + if e != nil { + return e + } + } + + return nil +} diff --git a/internal/api/resolver_model_gallery.go b/internal/api/resolver_model_gallery.go index 911cb8fe1..03d267a16 100644 --- a/internal/api/resolver_model_gallery.go +++ b/internal/api/resolver_model_gallery.go @@ -2,34 +2,134 @@ package api import ( "context" + "strconv" "time" + "github.com/stashapp/stash/internal/api/loaders" + + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/utils" ) -func (r *galleryResolver) Path(ctx context.Context, obj *models.Gallery) (*string, error) { - if obj.Path.Valid { - return &obj.Path.String, nil +func (r *galleryResolver) getPrimaryFile(ctx context.Context, obj *models.Gallery) (file.File, error) { + if obj.PrimaryFileID != nil { + f, err := loaders.From(ctx).FileByID.Load(*obj.PrimaryFileID) + if err != nil { + return nil, err + } + + return f, nil } + return nil, nil } -func (r *galleryResolver) Title(ctx context.Context, obj *models.Gallery) (*string, error) { - if obj.Title.Valid { - return &obj.Title.String, nil +func (r *galleryResolver) getFiles(ctx context.Context, obj *models.Gallery) ([]file.File, error) { + fileIDs, err := loaders.From(ctx).GalleryFiles.Load(obj.ID) + if err != nil { + return nil, err } + + files, errs := loaders.From(ctx).FileByID.LoadAll(fileIDs) + return files, firstError(errs) +} + +func (r *galleryResolver) Files(ctx context.Context, obj *models.Gallery) ([]*GalleryFile, error) { + files, err := r.getFiles(ctx, obj) + if err != nil { + return nil, err + } + + ret := make([]*GalleryFile, len(files)) + + for i, f := range files { + base := f.Base() + ret[i] = &GalleryFile{ + ID: strconv.Itoa(int(base.ID)), + Path: base.Path, + Basename: base.Basename, + ParentFolderID: strconv.Itoa(int(base.ParentFolderID)), + ModTime: base.ModTime, + Size: base.Size, + CreatedAt: base.CreatedAt, + UpdatedAt: base.UpdatedAt, + Fingerprints: resolveFingerprints(base), + } + + if base.ZipFileID != nil { + zipFileID := strconv.Itoa(int(*base.ZipFileID)) + ret[i].ZipFileID = &zipFileID + } + } + + return ret, nil +} + +func (r *galleryResolver) Folder(ctx context.Context, obj *models.Gallery) (*Folder, error) { + if obj.FolderID == nil { + return nil, nil + } + + var ret *file.Folder + + if err := r.withTxn(ctx, func(ctx context.Context) error { + var err error + + ret, err = r.repository.Folder.Find(ctx, *obj.FolderID) + if err != nil { + return err + } + + return err + }); err != nil { + return nil, err + } + + if ret == nil { + return nil, nil + } + + rr := &Folder{ + ID: ret.ID.String(), + Path: ret.Path, + ModTime: ret.ModTime, + CreatedAt: ret.CreatedAt, + UpdatedAt: ret.UpdatedAt, + } + + if ret.ParentFolderID != nil { + pfidStr := ret.ParentFolderID.String() + rr.ParentFolderID = &pfidStr + } + + if ret.ZipFileID != nil { + zfidStr := ret.ZipFileID.String() + rr.ZipFileID = &zfidStr + } + + return rr, nil +} + +func (r *galleryResolver) FileModTime(ctx context.Context, obj *models.Gallery) (*time.Time, error) { + f, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } + if f != nil { + return &f.Base().ModTime, nil + } + return nil, nil } func (r *galleryResolver) Images(ctx context.Context, obj *models.Gallery) (ret []*models.Image, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { var err error // #2376 - sort images by path // doing this via Query is really slow, so stick with FindByGalleryID - ret, err = repo.Image().FindByGalleryID(obj.ID) + ret, err = r.repository.Image.FindByGalleryID(ctx, obj.ID) if err != nil { return err } @@ -43,9 +143,9 @@ func (r *galleryResolver) Images(ctx context.Context, obj *models.Gallery) (ret } func (r *galleryResolver) Cover(ctx context.Context, obj *models.Gallery) (ret *models.Image, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { // doing this via Query is really slow, so stick with FindByGalleryID - imgs, err := repo.Image().FindByGalleryID(obj.ID) + imgs, err := r.repository.Image.FindByGalleryID(ctx, obj.ID) if err != nil { return err } @@ -70,91 +170,67 @@ func (r *galleryResolver) Cover(ctx context.Context, obj *models.Gallery) (ret * } func (r *galleryResolver) Date(ctx context.Context, obj *models.Gallery) (*string, error) { - if obj.Date.Valid { - result := utils.GetYMDFromDatabaseDate(obj.Date.String) + if obj.Date != nil { + result := obj.Date.String() return &result, nil } return nil, nil } -func (r *galleryResolver) URL(ctx context.Context, obj *models.Gallery) (*string, error) { - if obj.URL.Valid { - return &obj.URL.String, nil - } - return nil, nil -} - -func (r *galleryResolver) Details(ctx context.Context, obj *models.Gallery) (*string, error) { - if obj.Details.Valid { - return &obj.Details.String, nil - } - return nil, nil -} - -func (r *galleryResolver) Rating(ctx context.Context, obj *models.Gallery) (*int, error) { - if obj.Rating.Valid { - rating := int(obj.Rating.Int64) - return &rating, nil - } - return nil, nil -} - func (r *galleryResolver) Scenes(ctx context.Context, obj *models.Gallery) (ret []*models.Scene, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - var err error - ret, err = repo.Scene().FindByGalleryID(obj.ID) - return err - }); err != nil { - return nil, err + if !obj.SceneIDs.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadSceneIDs(ctx, r.repository.Gallery) + }); err != nil { + return nil, err + } } - return ret, nil + var errs []error + ret, errs = loaders.From(ctx).SceneByID.LoadAll(obj.SceneIDs.List()) + return ret, firstError(errs) } func (r *galleryResolver) Studio(ctx context.Context, obj *models.Gallery) (ret *models.Studio, err error) { - if !obj.StudioID.Valid { + if obj.StudioID == nil { return nil, nil } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - var err error - ret, err = repo.Studio().Find(int(obj.StudioID.Int64)) - return err - }); err != nil { - return nil, err - } - - return ret, nil + return loaders.From(ctx).StudioByID.Load(*obj.StudioID) } func (r *galleryResolver) Tags(ctx context.Context, obj *models.Gallery) (ret []*models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - var err error - ret, err = repo.Tag().FindByGalleryID(obj.ID) - return err - }); err != nil { - return nil, err + if !obj.TagIDs.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadTagIDs(ctx, r.repository.Gallery) + }); err != nil { + return nil, err + } } - return ret, nil + var errs []error + ret, errs = loaders.From(ctx).TagByID.LoadAll(obj.TagIDs.List()) + return ret, firstError(errs) } func (r *galleryResolver) Performers(ctx context.Context, obj *models.Gallery) (ret []*models.Performer, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - var err error - ret, err = repo.Performer().FindByGalleryID(obj.ID) - return err - }); err != nil { - return nil, err + if !obj.PerformerIDs.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadPerformerIDs(ctx, r.repository.Gallery) + }); err != nil { + return nil, err + } } - return ret, nil + var errs []error + ret, errs = loaders.From(ctx).PerformerByID.LoadAll(obj.PerformerIDs.List()) + return ret, firstError(errs) } func (r *galleryResolver) ImageCount(ctx context.Context, obj *models.Gallery) (ret int, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { var err error - ret, err = repo.Image().CountByGalleryID(obj.ID) + ret, err = r.repository.Image.CountByGalleryID(ctx, obj.ID) return err }); err != nil { return 0, err @@ -162,15 +238,3 @@ func (r *galleryResolver) ImageCount(ctx context.Context, obj *models.Gallery) ( return ret, nil } - -func (r *galleryResolver) CreatedAt(ctx context.Context, obj *models.Gallery) (*time.Time, error) { - return &obj.CreatedAt.Timestamp, nil -} - -func (r *galleryResolver) UpdatedAt(ctx context.Context, obj *models.Gallery) (*time.Time, error) { - return &obj.UpdatedAt.Timestamp, nil -} - -func (r *galleryResolver) FileModTime(ctx context.Context, obj *models.Gallery) (*time.Time, error) { - return &obj.FileModTime.Timestamp, nil -} diff --git a/internal/api/resolver_model_image.go b/internal/api/resolver_model_image.go index a77437dfb..136a46622 100644 --- a/internal/api/resolver_model_image.go +++ b/internal/api/resolver_model_image.go @@ -2,105 +2,180 @@ package api import ( "context" + "fmt" + "strconv" "time" + "github.com/stashapp/stash/internal/api/loaders" "github.com/stashapp/stash/internal/api/urlbuilders" - "github.com/stashapp/stash/pkg/image" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" ) -func (r *imageResolver) Title(ctx context.Context, obj *models.Image) (*string, error) { - ret := image.GetTitle(obj) - return &ret, nil -} +func (r *imageResolver) getPrimaryFile(ctx context.Context, obj *models.Image) (*file.ImageFile, error) { + if obj.PrimaryFileID != nil { + f, err := loaders.From(ctx).FileByID.Load(*obj.PrimaryFileID) + if err != nil { + return nil, err + } -func (r *imageResolver) Rating(ctx context.Context, obj *models.Image) (*int, error) { - if obj.Rating.Valid { - rating := int(obj.Rating.Int64) - return &rating, nil + ret, ok := f.(*file.ImageFile) + if !ok { + return nil, fmt.Errorf("file %T is not an image file", f) + } + + return ret, nil } + return nil, nil } -func (r *imageResolver) File(ctx context.Context, obj *models.Image) (*models.ImageFileType, error) { - width := int(obj.Width.Int64) - height := int(obj.Height.Int64) - size := int(obj.Size.Int64) - return &models.ImageFileType{ - Size: &size, - Width: &width, - Height: &height, +func (r *imageResolver) getFiles(ctx context.Context, obj *models.Image) ([]*file.ImageFile, error) { + fileIDs, err := loaders.From(ctx).ImageFiles.Load(obj.ID) + if err != nil { + return nil, err + } + + files, errs := loaders.From(ctx).FileByID.LoadAll(fileIDs) + ret := make([]*file.ImageFile, len(files)) + for i, bf := range files { + f, ok := bf.(*file.ImageFile) + if !ok { + return nil, fmt.Errorf("file %T is not an image file", f) + } + + ret[i] = f + } + + return ret, firstError(errs) +} + +func (r *imageResolver) Title(ctx context.Context, obj *models.Image) (*string, error) { + ret := obj.GetTitle() + return &ret, nil +} + +func (r *imageResolver) File(ctx context.Context, obj *models.Image) (*ImageFileType, error) { + f, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } + + if f == nil { + return nil, nil + } + + width := f.Width + height := f.Height + size := f.Size + return &ImageFileType{ + Size: int(size), + Width: width, + Height: height, }, nil } -func (r *imageResolver) Paths(ctx context.Context, obj *models.Image) (*models.ImagePathsType, error) { +func (r *imageResolver) Files(ctx context.Context, obj *models.Image) ([]*ImageFile, error) { + files, err := r.getFiles(ctx, obj) + if err != nil { + return nil, err + } + + ret := make([]*ImageFile, len(files)) + + for i, f := range files { + ret[i] = &ImageFile{ + ID: strconv.Itoa(int(f.ID)), + Path: f.Path, + Basename: f.Basename, + ParentFolderID: strconv.Itoa(int(f.ParentFolderID)), + ModTime: f.ModTime, + Size: f.Size, + Width: f.Width, + Height: f.Height, + CreatedAt: f.CreatedAt, + UpdatedAt: f.UpdatedAt, + Fingerprints: resolveFingerprints(f.Base()), + } + + if f.ZipFileID != nil { + zipFileID := strconv.Itoa(int(*f.ZipFileID)) + ret[i].ZipFileID = &zipFileID + } + } + + return ret, nil +} + +func (r *imageResolver) FileModTime(ctx context.Context, obj *models.Image) (*time.Time, error) { + f, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } + if f != nil { + return &f.ModTime, nil + } + + return nil, nil +} + +func (r *imageResolver) Paths(ctx context.Context, obj *models.Image) (*ImagePathsType, error) { baseURL, _ := ctx.Value(BaseURLCtxKey).(string) builder := urlbuilders.NewImageURLBuilder(baseURL, obj) thumbnailPath := builder.GetThumbnailURL() imagePath := builder.GetImageURL() - return &models.ImagePathsType{ + return &ImagePathsType{ Image: &imagePath, Thumbnail: &thumbnailPath, }, nil } func (r *imageResolver) Galleries(ctx context.Context, obj *models.Image) (ret []*models.Gallery, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - var err error - ret, err = repo.Gallery().FindByImageID(obj.ID) - return err - }); err != nil { - return nil, err + if !obj.GalleryIDs.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadGalleryIDs(ctx, r.repository.Image) + }); err != nil { + return nil, err + } } - return ret, nil + var errs []error + ret, errs = loaders.From(ctx).GalleryByID.LoadAll(obj.GalleryIDs.List()) + return ret, firstError(errs) } func (r *imageResolver) Studio(ctx context.Context, obj *models.Image) (ret *models.Studio, err error) { - if !obj.StudioID.Valid { + if obj.StudioID == nil { return nil, nil } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Studio().Find(int(obj.StudioID.Int64)) - return err - }); err != nil { - return nil, err - } - - return ret, nil + return loaders.From(ctx).StudioByID.Load(*obj.StudioID) } func (r *imageResolver) Tags(ctx context.Context, obj *models.Image) (ret []*models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().FindByImageID(obj.ID) - return err - }); err != nil { - return nil, err + if !obj.TagIDs.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadTagIDs(ctx, r.repository.Image) + }); err != nil { + return nil, err + } } - return ret, nil + var errs []error + ret, errs = loaders.From(ctx).TagByID.LoadAll(obj.TagIDs.List()) + return ret, firstError(errs) } func (r *imageResolver) Performers(ctx context.Context, obj *models.Image) (ret []*models.Performer, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Performer().FindByImageID(obj.ID) - return err - }); err != nil { - return nil, err + if !obj.PerformerIDs.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadPerformerIDs(ctx, r.repository.Image) + }); err != nil { + return nil, err + } } - return ret, nil -} - -func (r *imageResolver) CreatedAt(ctx context.Context, obj *models.Image) (*time.Time, error) { - return &obj.CreatedAt.Timestamp, nil -} - -func (r *imageResolver) UpdatedAt(ctx context.Context, obj *models.Image) (*time.Time, error) { - return &obj.UpdatedAt.Timestamp, nil -} - -func (r *imageResolver) FileModTime(ctx context.Context, obj *models.Image) (*time.Time, error) { - return &obj.FileModTime.Timestamp, nil + var errs []error + ret, errs = loaders.From(ctx).PerformerByID.LoadAll(obj.PerformerIDs.List()) + return ret, firstError(errs) } diff --git a/internal/api/resolver_model_movie.go b/internal/api/resolver_model_movie.go index 57e979828..e587e3ba5 100644 --- a/internal/api/resolver_model_movie.go +++ b/internal/api/resolver_model_movie.go @@ -4,6 +4,7 @@ import ( "context" "time" + "github.com/stashapp/stash/internal/api/loaders" "github.com/stashapp/stash/internal/api/urlbuilders" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/utils" @@ -56,14 +57,7 @@ func (r *movieResolver) Rating(ctx context.Context, obj *models.Movie) (*int, er func (r *movieResolver) Studio(ctx context.Context, obj *models.Movie) (ret *models.Studio, err error) { if obj.StudioID.Valid { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Studio().Find(int(obj.StudioID.Int64)) - return err - }); err != nil { - return nil, err - } - - return ret, nil + return loaders.From(ctx).StudioByID.Load(int(obj.StudioID.Int64)) } return nil, nil @@ -92,9 +86,9 @@ func (r *movieResolver) FrontImagePath(ctx context.Context, obj *models.Movie) ( func (r *movieResolver) BackImagePath(ctx context.Context, obj *models.Movie) (*string, error) { // don't return any thing if there is no back image var img []byte - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { var err error - img, err = repo.Movie().GetBackImage(obj.ID) + img, err = r.repository.Movie.GetBackImage(ctx, obj.ID) if err != nil { return err } @@ -115,8 +109,8 @@ func (r *movieResolver) BackImagePath(ctx context.Context, obj *models.Movie) (* func (r *movieResolver) SceneCount(ctx context.Context, obj *models.Movie) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = repo.Scene().CountByMovieID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = r.repository.Scene.CountByMovieID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -126,9 +120,9 @@ func (r *movieResolver) SceneCount(ctx context.Context, obj *models.Movie) (ret } func (r *movieResolver) Scenes(ctx context.Context, obj *models.Movie) (ret []*models.Scene, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { var err error - ret, err = repo.Scene().FindByMovieID(obj.ID) + ret, err = r.repository.Scene.FindByMovieID(ctx, obj.ID) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_model_performer.go b/internal/api/resolver_model_performer.go index 364c346ca..9e66fb38d 100644 --- a/internal/api/resolver_model_performer.go +++ b/internal/api/resolver_model_performer.go @@ -142,8 +142,8 @@ func (r *performerResolver) ImagePath(ctx context.Context, obj *models.Performer } func (r *performerResolver) Tags(ctx context.Context, obj *models.Performer) (ret []*models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().FindByPerformerID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Tag.FindByPerformerID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -154,8 +154,8 @@ func (r *performerResolver) Tags(ctx context.Context, obj *models.Performer) (re func (r *performerResolver) SceneCount(ctx context.Context, obj *models.Performer) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = repo.Scene().CountByPerformerID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = r.repository.Scene.CountByPerformerID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -166,8 +166,8 @@ func (r *performerResolver) SceneCount(ctx context.Context, obj *models.Performe func (r *performerResolver) ImageCount(ctx context.Context, obj *models.Performer) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = image.CountByPerformerID(repo.Image(), obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = image.CountByPerformerID(ctx, r.repository.Image, obj.ID) return err }); err != nil { return nil, err @@ -178,8 +178,8 @@ func (r *performerResolver) ImageCount(ctx context.Context, obj *models.Performe func (r *performerResolver) GalleryCount(ctx context.Context, obj *models.Performer) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = gallery.CountByPerformerID(repo.Gallery(), obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = gallery.CountByPerformerID(ctx, r.repository.Gallery, obj.ID) return err }); err != nil { return nil, err @@ -189,8 +189,8 @@ func (r *performerResolver) GalleryCount(ctx context.Context, obj *models.Perfor } func (r *performerResolver) Scenes(ctx context.Context, obj *models.Performer) (ret []*models.Scene, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Scene().FindByPerformerID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Scene.FindByPerformerID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -199,15 +199,17 @@ func (r *performerResolver) Scenes(ctx context.Context, obj *models.Performer) ( return ret, nil } -func (r *performerResolver) StashIds(ctx context.Context, obj *models.Performer) (ret []*models.StashID, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Performer().GetStashIDs(obj.ID) +func (r *performerResolver) StashIds(ctx context.Context, obj *models.Performer) ([]*models.StashID, error) { + var ret []models.StashID + if err := r.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = r.repository.Performer.GetStashIDs(ctx, obj.ID) return err }); err != nil { return nil, err } - return ret, nil + return stashIDsSliceToPtrSlice(ret), nil } func (r *performerResolver) Rating(ctx context.Context, obj *models.Performer) (*int, error) { @@ -256,8 +258,8 @@ func (r *performerResolver) UpdatedAt(ctx context.Context, obj *models.Performer } func (r *performerResolver) Movies(ctx context.Context, obj *models.Performer) (ret []*models.Movie, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Movie().FindByPerformerID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Movie.FindByPerformerID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -268,8 +270,8 @@ func (r *performerResolver) Movies(ctx context.Context, obj *models.Performer) ( func (r *performerResolver) MovieCount(ctx context.Context, obj *models.Performer) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = repo.Movie().CountByPerformerID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = r.repository.Movie.CountByPerformerID(ctx, obj.ID) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_model_scene.go b/internal/api/resolver_model_scene.go index 4fd583d5b..3be095340 100644 --- a/internal/api/resolver_model_scene.go +++ b/internal/api/resolver_model_scene.go @@ -2,95 +2,171 @@ package api import ( "context" + "fmt" + "strconv" "time" + "github.com/stashapp/stash/internal/api/loaders" "github.com/stashapp/stash/internal/api/urlbuilders" "github.com/stashapp/stash/internal/manager" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/utils" ) -func (r *sceneResolver) Checksum(ctx context.Context, obj *models.Scene) (*string, error) { - if obj.Checksum.Valid { - return &obj.Checksum.String, nil +func (r *sceneResolver) getPrimaryFile(ctx context.Context, obj *models.Scene) (*file.VideoFile, error) { + if obj.PrimaryFileID != nil { + f, err := loaders.From(ctx).FileByID.Load(*obj.PrimaryFileID) + if err != nil { + return nil, err + } + + ret, ok := f.(*file.VideoFile) + if !ok { + return nil, fmt.Errorf("file %T is not an image file", f) + } + + obj.Files.SetPrimary(ret) + + return ret, nil } + return nil, nil } -func (r *sceneResolver) Oshash(ctx context.Context, obj *models.Scene) (*string, error) { - if obj.OSHash.Valid { - return &obj.OSHash.String, nil +func (r *sceneResolver) getFiles(ctx context.Context, obj *models.Scene) ([]*file.VideoFile, error) { + fileIDs, err := loaders.From(ctx).SceneFiles.Load(obj.ID) + if err != nil { + return nil, err } - return nil, nil + + files, errs := loaders.From(ctx).FileByID.LoadAll(fileIDs) + ret := make([]*file.VideoFile, len(files)) + for i, bf := range files { + f, ok := bf.(*file.VideoFile) + if !ok { + return nil, fmt.Errorf("file %T is not a video file", f) + } + + ret[i] = f + } + + obj.Files.Set(ret) + + return ret, firstError(errs) } -func (r *sceneResolver) Title(ctx context.Context, obj *models.Scene) (*string, error) { - if obj.Title.Valid { - return &obj.Title.String, nil +func (r *sceneResolver) FileModTime(ctx context.Context, obj *models.Scene) (*time.Time, error) { + f, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err } - return nil, nil -} -func (r *sceneResolver) Details(ctx context.Context, obj *models.Scene) (*string, error) { - if obj.Details.Valid { - return &obj.Details.String, nil - } - return nil, nil -} - -func (r *sceneResolver) URL(ctx context.Context, obj *models.Scene) (*string, error) { - if obj.URL.Valid { - return &obj.URL.String, nil + if f != nil { + return &f.ModTime, nil } return nil, nil } func (r *sceneResolver) Date(ctx context.Context, obj *models.Scene) (*string, error) { - if obj.Date.Valid { - result := utils.GetYMDFromDatabaseDate(obj.Date.String) + if obj.Date != nil { + result := obj.Date.String() return &result, nil } return nil, nil } -func (r *sceneResolver) Rating(ctx context.Context, obj *models.Scene) (*int, error) { - if obj.Rating.Valid { - rating := int(obj.Rating.Int64) - return &rating, nil - } - return nil, nil -} - -func (r *sceneResolver) InteractiveSpeed(ctx context.Context, obj *models.Scene) (*int, error) { - if obj.InteractiveSpeed.Valid { - interactive_speed := int(obj.InteractiveSpeed.Int64) - return &interactive_speed, nil - } - return nil, nil -} - +// File is deprecated func (r *sceneResolver) File(ctx context.Context, obj *models.Scene) (*models.SceneFileType, error) { - width := int(obj.Width.Int64) - height := int(obj.Height.Int64) - bitrate := int(obj.Bitrate.Int64) + f, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } + if f == nil { + return nil, nil + } + + bitrate := int(f.BitRate) + size := strconv.FormatInt(f.Size, 10) + return &models.SceneFileType{ - Size: &obj.Size.String, - Duration: handleFloat64(obj.Duration.Float64), - VideoCodec: &obj.VideoCodec.String, - AudioCodec: &obj.AudioCodec.String, - Width: &width, - Height: &height, - Framerate: handleFloat64(obj.Framerate.Float64), + Size: &size, + Duration: handleFloat64(f.Duration), + VideoCodec: &f.VideoCodec, + AudioCodec: &f.AudioCodec, + Width: &f.Width, + Height: &f.Height, + Framerate: handleFloat64(f.FrameRate), Bitrate: &bitrate, }, nil } -func (r *sceneResolver) Paths(ctx context.Context, obj *models.Scene) (*models.ScenePathsType, error) { +func (r *sceneResolver) Files(ctx context.Context, obj *models.Scene) ([]*VideoFile, error) { + files, err := r.getFiles(ctx, obj) + if err != nil { + return nil, err + } + + ret := make([]*VideoFile, len(files)) + + for i, f := range files { + ret[i] = &VideoFile{ + ID: strconv.Itoa(int(f.ID)), + Path: f.Path, + Basename: f.Basename, + ParentFolderID: strconv.Itoa(int(f.ParentFolderID)), + ModTime: f.ModTime, + Format: f.Format, + Size: f.Size, + Duration: handleFloat64Value(f.Duration), + VideoCodec: f.VideoCodec, + AudioCodec: f.AudioCodec, + Width: f.Width, + Height: f.Height, + FrameRate: handleFloat64Value(f.FrameRate), + BitRate: int(f.BitRate), + CreatedAt: f.CreatedAt, + UpdatedAt: f.UpdatedAt, + Fingerprints: resolveFingerprints(f.Base()), + } + + if f.ZipFileID != nil { + zipFileID := strconv.Itoa(int(*f.ZipFileID)) + ret[i].ZipFileID = &zipFileID + } + } + + return ret, nil +} + +func resolveFingerprints(f *file.BaseFile) []*Fingerprint { + ret := make([]*Fingerprint, len(f.Fingerprints)) + + for i, fp := range f.Fingerprints { + ret[i] = &Fingerprint{ + Type: fp.Type, + Value: formatFingerprint(fp.Fingerprint), + } + } + + return ret +} + +func formatFingerprint(fp interface{}) string { + switch v := fp.(type) { + case int64: + return strconv.FormatUint(uint64(v), 16) + default: + return fmt.Sprintf("%v", fp) + } +} + +func (r *sceneResolver) Paths(ctx context.Context, obj *models.Scene) (*ScenePathsType, error) { baseURL, _ := ctx.Value(BaseURLCtxKey).(string) config := manager.GetInstance().Config builder := urlbuilders.NewSceneURLBuilder(baseURL, obj.ID) builder.APIKey = config.GetAPIKey() - screenshotPath := builder.GetScreenshotURL(obj.UpdatedAt.Timestamp) + screenshotPath := builder.GetScreenshotURL(obj.UpdatedAt) previewPath := builder.GetStreamPreviewURL() streamPath := builder.GetStreamURL() webpPath := builder.GetStreamPreviewImageURL() @@ -101,7 +177,7 @@ func (r *sceneResolver) Paths(ctx context.Context, obj *models.Scene) (*models.S captionBasePath := builder.GetCaptionURL() interactiveHeatmap := builder.GetInteractiveHeatmapURL() - return &models.ScenePathsType{ + return &ScenePathsType{ Screenshot: &screenshotPath, Preview: &previewPath, Stream: &streamPath, @@ -116,8 +192,8 @@ func (r *sceneResolver) Paths(ctx context.Context, obj *models.Scene) (*models.S } func (r *sceneResolver) SceneMarkers(ctx context.Context, obj *models.Scene) (ret []*models.SceneMarker, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.SceneMarker().FindBySceneID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.SceneMarker.FindBySceneID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -126,9 +202,17 @@ func (r *sceneResolver) SceneMarkers(ctx context.Context, obj *models.Scene) (re return ret, nil } -func (r *sceneResolver) Captions(ctx context.Context, obj *models.Scene) (ret []*models.SceneCaption, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Scene().GetCaptions(obj.ID) +func (r *sceneResolver) Captions(ctx context.Context, obj *models.Scene) (ret []*models.VideoCaption, err error) { + primaryFile, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } + if primaryFile == nil { + return nil, nil + } + + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.File.GetCaptions(ctx, primaryFile.Base().ID) return err }); err != nil { return nil, err @@ -138,121 +222,137 @@ func (r *sceneResolver) Captions(ctx context.Context, obj *models.Scene) (ret [] } func (r *sceneResolver) Galleries(ctx context.Context, obj *models.Scene) (ret []*models.Gallery, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Gallery().FindBySceneID(obj.ID) - return err - }); err != nil { - return nil, err + if !obj.GalleryIDs.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadGalleryIDs(ctx, r.repository.Scene) + }); err != nil { + return nil, err + } } - return ret, nil + var errs []error + ret, errs = loaders.From(ctx).GalleryByID.LoadAll(obj.GalleryIDs.List()) + return ret, firstError(errs) } func (r *sceneResolver) Studio(ctx context.Context, obj *models.Scene) (ret *models.Studio, err error) { - if !obj.StudioID.Valid { + if obj.StudioID == nil { return nil, nil } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Studio().Find(int(obj.StudioID.Int64)) - return err - }); err != nil { - return nil, err - } - - return ret, nil + return loaders.From(ctx).StudioByID.Load(*obj.StudioID) } -func (r *sceneResolver) Movies(ctx context.Context, obj *models.Scene) (ret []*models.SceneMovie, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - qb := repo.Scene() - mqb := repo.Movie() +func (r *sceneResolver) Movies(ctx context.Context, obj *models.Scene) (ret []*SceneMovie, err error) { + if !obj.Movies.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene - sceneMovies, err := qb.GetMovies(obj.ID) - if err != nil { - return err + return obj.LoadMovies(ctx, qb) + }); err != nil { + return nil, err } - - for _, sm := range sceneMovies { - movie, err := mqb.Find(sm.MovieID) - if err != nil { - return err - } - - sceneIdx := sm.SceneIndex - sceneMovie := &models.SceneMovie{ - Movie: movie, - } - - if sceneIdx.Valid { - idx := int(sceneIdx.Int64) - sceneMovie.SceneIndex = &idx - } - - ret = append(ret, sceneMovie) - } - - return nil - }); err != nil { - return nil, err } + + loader := loaders.From(ctx).MovieByID + + for _, sm := range obj.Movies.List() { + movie, err := loader.Load(sm.MovieID) + if err != nil { + return nil, err + } + + sceneIdx := sm.SceneIndex + sceneMovie := &SceneMovie{ + Movie: movie, + SceneIndex: sceneIdx, + } + + ret = append(ret, sceneMovie) + } + return ret, nil } func (r *sceneResolver) Tags(ctx context.Context, obj *models.Scene) (ret []*models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().FindBySceneID(obj.ID) - return err - }); err != nil { - return nil, err + if !obj.TagIDs.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadTagIDs(ctx, r.repository.Scene) + }); err != nil { + return nil, err + } } - return ret, nil + var errs []error + ret, errs = loaders.From(ctx).TagByID.LoadAll(obj.TagIDs.List()) + return ret, firstError(errs) } func (r *sceneResolver) Performers(ctx context.Context, obj *models.Scene) (ret []*models.Performer, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Performer().FindBySceneID(obj.ID) - return err - }); err != nil { - return nil, err + if !obj.PerformerIDs.Loaded() { + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadPerformerIDs(ctx, r.repository.Scene) + }); err != nil { + return nil, err + } } - return ret, nil + var errs []error + ret, errs = loaders.From(ctx).PerformerByID.LoadAll(obj.PerformerIDs.List()) + return ret, firstError(errs) +} + +func stashIDsSliceToPtrSlice(v []models.StashID) []*models.StashID { + ret := make([]*models.StashID, len(v)) + for i, vv := range v { + c := vv + ret[i] = &c + } + + return ret } func (r *sceneResolver) StashIds(ctx context.Context, obj *models.Scene) (ret []*models.StashID, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Scene().GetStashIDs(obj.ID) - return err + if err := r.withTxn(ctx, func(ctx context.Context) error { + return obj.LoadStashIDs(ctx, r.repository.Scene) }); err != nil { return nil, err } - return ret, nil + return stashIDsSliceToPtrSlice(obj.StashIDs.List()), nil } func (r *sceneResolver) Phash(ctx context.Context, obj *models.Scene) (*string, error) { - if obj.Phash.Valid { - hexval := utils.PhashToString(obj.Phash.Int64) + f, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } + + if f == nil { + return nil, nil + } + + val := f.Fingerprints.Get(file.FingerprintTypePhash) + if val == nil { + return nil, nil + } + + phash, _ := val.(int64) + + if phash != 0 { + hexval := utils.PhashToString(phash) return &hexval, nil } return nil, nil } -func (r *sceneResolver) CreatedAt(ctx context.Context, obj *models.Scene) (*time.Time, error) { - return &obj.CreatedAt.Timestamp, nil -} +func (r *sceneResolver) SceneStreams(ctx context.Context, obj *models.Scene) ([]*manager.SceneStreamEndpoint, error) { + // load the primary file into the scene + _, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } -func (r *sceneResolver) UpdatedAt(ctx context.Context, obj *models.Scene) (*time.Time, error) { - return &obj.UpdatedAt.Timestamp, nil -} - -func (r *sceneResolver) FileModTime(ctx context.Context, obj *models.Scene) (*time.Time, error) { - return &obj.FileModTime.Timestamp, nil -} - -func (r *sceneResolver) SceneStreams(ctx context.Context, obj *models.Scene) ([]*models.SceneStreamEndpoint, error) { config := manager.GetInstance().Config baseURL, _ := ctx.Value(BaseURLCtxKey).(string) @@ -260,3 +360,27 @@ func (r *sceneResolver) SceneStreams(ctx context.Context, obj *models.Scene) ([] return manager.GetSceneStreamPaths(obj, builder.GetStreamURL(), config.GetMaxStreamingTranscodeSize()) } + +func (r *sceneResolver) Interactive(ctx context.Context, obj *models.Scene) (bool, error) { + primaryFile, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return false, err + } + if primaryFile == nil { + return false, nil + } + + return primaryFile.Interactive, nil +} + +func (r *sceneResolver) InteractiveSpeed(ctx context.Context, obj *models.Scene) (*int, error) { + primaryFile, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } + if primaryFile == nil { + return nil, nil + } + + return primaryFile.InteractiveSpeed, nil +} diff --git a/internal/api/resolver_model_scene_marker.go b/internal/api/resolver_model_scene_marker.go index 64d418bd1..7a4d01be1 100644 --- a/internal/api/resolver_model_scene_marker.go +++ b/internal/api/resolver_model_scene_marker.go @@ -13,9 +13,9 @@ func (r *sceneMarkerResolver) Scene(ctx context.Context, obj *models.SceneMarker panic("Invalid scene id") } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { sceneID := int(obj.SceneID.Int64) - ret, err = repo.Scene().Find(sceneID) + ret, err = r.repository.Scene.Find(ctx, sceneID) return err }); err != nil { return nil, err @@ -25,8 +25,8 @@ func (r *sceneMarkerResolver) Scene(ctx context.Context, obj *models.SceneMarker } func (r *sceneMarkerResolver) PrimaryTag(ctx context.Context, obj *models.SceneMarker) (ret *models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().Find(obj.PrimaryTagID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Tag.Find(ctx, obj.PrimaryTagID) return err }); err != nil { return nil, err @@ -36,8 +36,8 @@ func (r *sceneMarkerResolver) PrimaryTag(ctx context.Context, obj *models.SceneM } func (r *sceneMarkerResolver) Tags(ctx context.Context, obj *models.SceneMarker) (ret []*models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().FindBySceneMarkerID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Tag.FindBySceneMarkerID(ctx, obj.ID) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_model_studio.go b/internal/api/resolver_model_studio.go index d2b6b44c1..79ef8259e 100644 --- a/internal/api/resolver_model_studio.go +++ b/internal/api/resolver_model_studio.go @@ -4,6 +4,7 @@ import ( "context" "time" + "github.com/stashapp/stash/internal/api/loaders" "github.com/stashapp/stash/internal/api/urlbuilders" "github.com/stashapp/stash/pkg/gallery" "github.com/stashapp/stash/pkg/image" @@ -29,9 +30,9 @@ func (r *studioResolver) ImagePath(ctx context.Context, obj *models.Studio) (*st imagePath := urlbuilders.NewStudioURLBuilder(baseURL, obj).GetStudioImageURL() var hasImage bool - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { var err error - hasImage, err = repo.Studio().HasImage(obj.ID) + hasImage, err = r.repository.Studio.HasImage(ctx, obj.ID) return err }); err != nil { return nil, err @@ -46,8 +47,8 @@ func (r *studioResolver) ImagePath(ctx context.Context, obj *models.Studio) (*st } func (r *studioResolver) Aliases(ctx context.Context, obj *models.Studio) (ret []string, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Studio().GetAliases(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Studio.GetAliases(ctx, obj.ID) return err }); err != nil { return nil, err @@ -58,8 +59,8 @@ func (r *studioResolver) Aliases(ctx context.Context, obj *models.Studio) (ret [ func (r *studioResolver) SceneCount(ctx context.Context, obj *models.Studio) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = repo.Scene().CountByStudioID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = r.repository.Scene.CountByStudioID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -70,8 +71,8 @@ func (r *studioResolver) SceneCount(ctx context.Context, obj *models.Studio) (re func (r *studioResolver) ImageCount(ctx context.Context, obj *models.Studio) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = image.CountByStudioID(repo.Image(), obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = image.CountByStudioID(ctx, r.repository.Image, obj.ID) return err }); err != nil { return nil, err @@ -82,8 +83,8 @@ func (r *studioResolver) ImageCount(ctx context.Context, obj *models.Studio) (re func (r *studioResolver) GalleryCount(ctx context.Context, obj *models.Studio) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = gallery.CountByStudioID(repo.Gallery(), obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = gallery.CountByStudioID(ctx, r.repository.Gallery, obj.ID) return err }); err != nil { return nil, err @@ -97,19 +98,12 @@ func (r *studioResolver) ParentStudio(ctx context.Context, obj *models.Studio) ( return nil, nil } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Studio().Find(int(obj.ParentID.Int64)) - return err - }); err != nil { - return nil, err - } - - return ret, nil + return loaders.From(ctx).StudioByID.Load(int(obj.ParentID.Int64)) } func (r *studioResolver) ChildStudios(ctx context.Context, obj *models.Studio) (ret []*models.Studio, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Studio().FindChildren(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Studio.FindChildren(ctx, obj.ID) return err }); err != nil { return nil, err @@ -118,15 +112,17 @@ func (r *studioResolver) ChildStudios(ctx context.Context, obj *models.Studio) ( return ret, nil } -func (r *studioResolver) StashIds(ctx context.Context, obj *models.Studio) (ret []*models.StashID, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Studio().GetStashIDs(obj.ID) +func (r *studioResolver) StashIds(ctx context.Context, obj *models.Studio) ([]*models.StashID, error) { + var ret []models.StashID + if err := r.withTxn(ctx, func(ctx context.Context) error { + var err error + ret, err = r.repository.Studio.GetStashIDs(ctx, obj.ID) return err }); err != nil { return nil, err } - return ret, nil + return stashIDsSliceToPtrSlice(ret), nil } func (r *studioResolver) Rating(ctx context.Context, obj *models.Studio) (*int, error) { @@ -153,8 +149,8 @@ func (r *studioResolver) UpdatedAt(ctx context.Context, obj *models.Studio) (*ti } func (r *studioResolver) Movies(ctx context.Context, obj *models.Studio) (ret []*models.Movie, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Movie().FindByStudioID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Movie.FindByStudioID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -165,8 +161,8 @@ func (r *studioResolver) Movies(ctx context.Context, obj *models.Studio) (ret [] func (r *studioResolver) MovieCount(ctx context.Context, obj *models.Studio) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = repo.Movie().CountByStudioID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = r.repository.Movie.CountByStudioID(ctx, obj.ID) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_model_tag.go b/internal/api/resolver_model_tag.go index cb406e5fc..3592dd959 100644 --- a/internal/api/resolver_model_tag.go +++ b/internal/api/resolver_model_tag.go @@ -11,8 +11,8 @@ import ( ) func (r *tagResolver) Parents(ctx context.Context, obj *models.Tag) (ret []*models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().FindByChildTagID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Tag.FindByChildTagID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -22,8 +22,8 @@ func (r *tagResolver) Parents(ctx context.Context, obj *models.Tag) (ret []*mode } func (r *tagResolver) Children(ctx context.Context, obj *models.Tag) (ret []*models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().FindByParentTagID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Tag.FindByParentTagID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -33,8 +33,8 @@ func (r *tagResolver) Children(ctx context.Context, obj *models.Tag) (ret []*mod } func (r *tagResolver) Aliases(ctx context.Context, obj *models.Tag) (ret []string, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().GetAliases(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Tag.GetAliases(ctx, obj.ID) return err }); err != nil { return nil, err @@ -45,8 +45,8 @@ func (r *tagResolver) Aliases(ctx context.Context, obj *models.Tag) (ret []strin func (r *tagResolver) SceneCount(ctx context.Context, obj *models.Tag) (ret *int, err error) { var count int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - count, err = repo.Scene().CountByTagID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + count, err = r.repository.Scene.CountByTagID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -57,8 +57,8 @@ func (r *tagResolver) SceneCount(ctx context.Context, obj *models.Tag) (ret *int func (r *tagResolver) SceneMarkerCount(ctx context.Context, obj *models.Tag) (ret *int, err error) { var count int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - count, err = repo.SceneMarker().CountByTagID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + count, err = r.repository.SceneMarker.CountByTagID(ctx, obj.ID) return err }); err != nil { return nil, err @@ -69,8 +69,8 @@ func (r *tagResolver) SceneMarkerCount(ctx context.Context, obj *models.Tag) (re func (r *tagResolver) ImageCount(ctx context.Context, obj *models.Tag) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = image.CountByTagID(repo.Image(), obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = image.CountByTagID(ctx, r.repository.Image, obj.ID) return err }); err != nil { return nil, err @@ -81,8 +81,8 @@ func (r *tagResolver) ImageCount(ctx context.Context, obj *models.Tag) (ret *int func (r *tagResolver) GalleryCount(ctx context.Context, obj *models.Tag) (ret *int, err error) { var res int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - res, err = gallery.CountByTagID(repo.Gallery(), obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + res, err = gallery.CountByTagID(ctx, r.repository.Gallery, obj.ID) return err }); err != nil { return nil, err @@ -93,8 +93,8 @@ func (r *tagResolver) GalleryCount(ctx context.Context, obj *models.Tag) (ret *i func (r *tagResolver) PerformerCount(ctx context.Context, obj *models.Tag) (ret *int, err error) { var count int - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - count, err = repo.Performer().CountByTagID(obj.ID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + count, err = r.repository.Performer.CountByTagID(ctx, obj.ID) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_mutation_configure.go b/internal/api/resolver_mutation_configure.go index 7413c413b..f1e35239d 100644 --- a/internal/api/resolver_mutation_configure.go +++ b/internal/api/resolver_mutation_configure.go @@ -15,21 +15,21 @@ import ( var ErrOverriddenConfig = errors.New("cannot set overridden value") -func (r *mutationResolver) Setup(ctx context.Context, input models.SetupInput) (bool, error) { +func (r *mutationResolver) Setup(ctx context.Context, input manager.SetupInput) (bool, error) { err := manager.GetInstance().Setup(ctx, input) return err == nil, err } -func (r *mutationResolver) Migrate(ctx context.Context, input models.MigrateInput) (bool, error) { +func (r *mutationResolver) Migrate(ctx context.Context, input manager.MigrateInput) (bool, error) { err := manager.GetInstance().Migrate(ctx, input) return err == nil, err } -func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input models.ConfigGeneralInput) (*models.ConfigGeneralResult, error) { +func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input ConfigGeneralInput) (*ConfigGeneralResult, error) { c := config.GetInstance() existingPaths := c.GetStashPaths() - if len(input.Stashes) > 0 { + if input.Stashes != nil { for _, s := range input.Stashes { // Only validate existence of new paths isNew := true @@ -132,7 +132,9 @@ func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input models.Co } // validate changing VideoFileNamingAlgorithm - if err := manager.ValidateVideoFileNamingAlgorithm(r.txnManager, *input.VideoFileNamingAlgorithm); err != nil { + if err := r.withTxn(context.TODO(), func(ctx context.Context) error { + return manager.ValidateVideoFileNamingAlgorithm(ctx, r.repository.Scene, *input.VideoFileNamingAlgorithm) + }); err != nil { return makeConfigGeneralResult(), err } @@ -281,7 +283,7 @@ func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input models.Co return makeConfigGeneralResult(), nil } -func (r *mutationResolver) ConfigureInterface(ctx context.Context, input models.ConfigInterfaceInput) (*models.ConfigInterfaceResult, error) { +func (r *mutationResolver) ConfigureInterface(ctx context.Context, input ConfigInterfaceInput) (*ConfigInterfaceResult, error) { c := config.GetInstance() setBool := func(key string, v *bool) { @@ -338,10 +340,10 @@ func (r *mutationResolver) ConfigureInterface(ctx context.Context, input models. c.Set(config.ImageLightboxSlideshowDelay, *options.SlideshowDelay) } - setString(config.ImageLightboxDisplayMode, (*string)(options.DisplayMode)) + setString(config.ImageLightboxDisplayModeKey, (*string)(options.DisplayMode)) setBool(config.ImageLightboxScaleUp, options.ScaleUp) setBool(config.ImageLightboxResetZoomOnNav, options.ResetZoomOnNav) - setString(config.ImageLightboxScrollMode, (*string)(options.ScrollMode)) + setString(config.ImageLightboxScrollModeKey, (*string)(options.ScrollMode)) if options.ScrollAttemptsBeforeChange != nil { c.Set(config.ImageLightboxScrollAttemptsBeforeChange, *options.ScrollAttemptsBeforeChange) @@ -376,7 +378,7 @@ func (r *mutationResolver) ConfigureInterface(ctx context.Context, input models. return makeConfigInterfaceResult(), nil } -func (r *mutationResolver) ConfigureDlna(ctx context.Context, input models.ConfigDLNAInput) (*models.ConfigDLNAResult, error) { +func (r *mutationResolver) ConfigureDlna(ctx context.Context, input ConfigDLNAInput) (*ConfigDLNAResult, error) { c := config.GetInstance() if input.ServerName != nil { @@ -413,7 +415,7 @@ func (r *mutationResolver) ConfigureDlna(ctx context.Context, input models.Confi return makeConfigDLNAResult(), nil } -func (r *mutationResolver) ConfigureScraping(ctx context.Context, input models.ConfigScrapingInput) (*models.ConfigScrapingResult, error) { +func (r *mutationResolver) ConfigureScraping(ctx context.Context, input ConfigScrapingInput) (*ConfigScrapingResult, error) { c := config.GetInstance() refreshScraperCache := false @@ -445,7 +447,7 @@ func (r *mutationResolver) ConfigureScraping(ctx context.Context, input models.C return makeConfigScrapingResult(), nil } -func (r *mutationResolver) ConfigureDefaults(ctx context.Context, input models.ConfigDefaultSettingsInput) (*models.ConfigDefaultSettingsResult, error) { +func (r *mutationResolver) ConfigureDefaults(ctx context.Context, input ConfigDefaultSettingsInput) (*ConfigDefaultSettingsResult, error) { c := config.GetInstance() if input.Identify != nil { @@ -453,7 +455,7 @@ func (r *mutationResolver) ConfigureDefaults(ctx context.Context, input models.C } if input.Scan != nil { - c.Set(config.DefaultScanSettings, input.Scan) + c.Set(config.DefaultScanSettings, input.Scan.ScanMetadataOptions) } if input.AutoTag != nil { @@ -479,7 +481,7 @@ func (r *mutationResolver) ConfigureDefaults(ctx context.Context, input models.C return makeConfigDefaultsResult(), nil } -func (r *mutationResolver) GenerateAPIKey(ctx context.Context, input models.GenerateAPIKeyInput) (string, error) { +func (r *mutationResolver) GenerateAPIKey(ctx context.Context, input GenerateAPIKeyInput) (string, error) { c := config.GetInstance() var newAPIKey string diff --git a/internal/api/resolver_mutation_dlna.go b/internal/api/resolver_mutation_dlna.go index 6f43a9e6f..cb62afac9 100644 --- a/internal/api/resolver_mutation_dlna.go +++ b/internal/api/resolver_mutation_dlna.go @@ -5,10 +5,9 @@ import ( "time" "github.com/stashapp/stash/internal/manager" - "github.com/stashapp/stash/pkg/models" ) -func (r *mutationResolver) EnableDlna(ctx context.Context, input models.EnableDLNAInput) (bool, error) { +func (r *mutationResolver) EnableDlna(ctx context.Context, input EnableDLNAInput) (bool, error) { err := manager.GetInstance().DLNAService.Start(parseMinutes(input.Duration)) if err != nil { return false, err @@ -16,17 +15,17 @@ func (r *mutationResolver) EnableDlna(ctx context.Context, input models.EnableDL return true, nil } -func (r *mutationResolver) DisableDlna(ctx context.Context, input models.DisableDLNAInput) (bool, error) { +func (r *mutationResolver) DisableDlna(ctx context.Context, input DisableDLNAInput) (bool, error) { manager.GetInstance().DLNAService.Stop(parseMinutes(input.Duration)) return true, nil } -func (r *mutationResolver) AddTempDlnaip(ctx context.Context, input models.AddTempDLNAIPInput) (bool, error) { +func (r *mutationResolver) AddTempDlnaip(ctx context.Context, input AddTempDLNAIPInput) (bool, error) { manager.GetInstance().DLNAService.AddTempDLNAIP(input.Address, parseMinutes(input.Duration)) return true, nil } -func (r *mutationResolver) RemoveTempDlnaip(ctx context.Context, input models.RemoveTempDLNAIPInput) (bool, error) { +func (r *mutationResolver) RemoveTempDlnaip(ctx context.Context, input RemoveTempDLNAIPInput) (bool, error) { ret := manager.GetInstance().DLNAService.RemoveTempDLNAIP(input.Address) return ret, nil } diff --git a/internal/api/resolver_mutation_gallery.go b/internal/api/resolver_mutation_gallery.go index cd04a0313..aa567734d 100644 --- a/internal/api/resolver_mutation_gallery.go +++ b/internal/api/resolver_mutation_gallery.go @@ -2,7 +2,6 @@ package api import ( "context" - "database/sql" "errors" "fmt" "os" @@ -11,7 +10,6 @@ import ( "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/pkg/file" - "github.com/stashapp/stash/pkg/hash/md5" "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/plugin" @@ -21,8 +19,8 @@ import ( ) func (r *mutationResolver) getGallery(ctx context.Context, id int) (ret *models.Gallery, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Gallery().Find(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Gallery.Find(ctx, id) return err }); err != nil { return nil, err @@ -31,75 +29,57 @@ func (r *mutationResolver) getGallery(ctx context.Context, id int) (ret *models. return ret, nil } -func (r *mutationResolver) GalleryCreate(ctx context.Context, input models.GalleryCreateInput) (*models.Gallery, error) { +func (r *mutationResolver) GalleryCreate(ctx context.Context, input GalleryCreateInput) (*models.Gallery, error) { // name must be provided if input.Title == "" { return nil, errors.New("title must not be empty") } - // for manually created galleries, generate checksum from title - checksum := md5.FromString(input.Title) - // Populate a new performer from the input + performerIDs, err := stringslice.StringSliceToIntSlice(input.PerformerIds) + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) + } + tagIDs, err := stringslice.StringSliceToIntSlice(input.TagIds) + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) + } + sceneIDs, err := stringslice.StringSliceToIntSlice(input.SceneIds) + if err != nil { + return nil, fmt.Errorf("converting scene ids: %w", err) + } + currentTime := time.Now() newGallery := models.Gallery{ - Title: sql.NullString{ - String: input.Title, - Valid: true, - }, - Checksum: checksum, - CreatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, - UpdatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, + Title: input.Title, + PerformerIDs: models.NewRelatedIDs(performerIDs), + TagIDs: models.NewRelatedIDs(tagIDs), + SceneIDs: models.NewRelatedIDs(sceneIDs), + CreatedAt: currentTime, + UpdatedAt: currentTime, } if input.URL != nil { - newGallery.URL = sql.NullString{String: *input.URL, Valid: true} + newGallery.URL = *input.URL } if input.Details != nil { - newGallery.Details = sql.NullString{String: *input.Details, Valid: true} - } - if input.URL != nil { - newGallery.URL = sql.NullString{String: *input.URL, Valid: true} - } - if input.Date != nil { - newGallery.Date = models.SQLiteDate{String: *input.Date, Valid: true} - } - if input.Rating != nil { - newGallery.Rating = sql.NullInt64{Int64: int64(*input.Rating), Valid: true} - } else { - // rating must be nullable - newGallery.Rating = sql.NullInt64{Valid: false} + newGallery.Details = *input.Details } + if input.Date != nil { + d := models.NewDate(*input.Date) + newGallery.Date = &d + } + newGallery.Rating = input.Rating + if input.StudioID != nil { - studioID, _ := strconv.ParseInt(*input.StudioID, 10, 64) - newGallery.StudioID = sql.NullInt64{Int64: studioID, Valid: true} - } else { - // studio must be nullable - newGallery.StudioID = sql.NullInt64{Valid: false} + studioID, _ := strconv.Atoi(*input.StudioID) + newGallery.StudioID = &studioID } // Start the transaction and save the gallery - var gallery *models.Gallery - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Gallery() - var err error - gallery, err = qb.Create(newGallery) - if err != nil { - return err - } - - // Save the performers - if err := r.updateGalleryPerformers(qb, gallery.ID, input.PerformerIds); err != nil { - return err - } - - // Save the tags - if err := r.updateGalleryTags(qb, gallery.ID, input.TagIds); err != nil { - return err - } - - // Save the scenes - if err := r.updateGalleryScenes(qb, gallery.ID, input.SceneIds); err != nil { + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Gallery + if err := qb.Create(ctx, &newGallery, nil); err != nil { return err } @@ -108,32 +88,12 @@ func (r *mutationResolver) GalleryCreate(ctx context.Context, input models.Galle return nil, err } - r.hookExecutor.ExecutePostHooks(ctx, gallery.ID, plugin.GalleryCreatePost, input, nil) - return r.getGallery(ctx, gallery.ID) + r.hookExecutor.ExecutePostHooks(ctx, newGallery.ID, plugin.GalleryCreatePost, input, nil) + return r.getGallery(ctx, newGallery.ID) } -func (r *mutationResolver) updateGalleryPerformers(qb models.GalleryReaderWriter, galleryID int, performerIDs []string) error { - ids, err := stringslice.StringSliceToIntSlice(performerIDs) - if err != nil { - return err - } - return qb.UpdatePerformers(galleryID, ids) -} - -func (r *mutationResolver) updateGalleryTags(qb models.GalleryReaderWriter, galleryID int, tagIDs []string) error { - ids, err := stringslice.StringSliceToIntSlice(tagIDs) - if err != nil { - return err - } - return qb.UpdateTags(galleryID, ids) -} - -func (r *mutationResolver) updateGalleryScenes(qb models.GalleryReaderWriter, galleryID int, sceneIDs []string) error { - ids, err := stringslice.StringSliceToIntSlice(sceneIDs) - if err != nil { - return err - } - return qb.UpdateScenes(galleryID, ids) +type GallerySceneUpdater interface { + UpdateScenes(ctx context.Context, galleryID int, sceneIDs []int) error } func (r *mutationResolver) GalleryUpdate(ctx context.Context, input models.GalleryUpdateInput) (ret *models.Gallery, err error) { @@ -142,8 +102,8 @@ func (r *mutationResolver) GalleryUpdate(ctx context.Context, input models.Galle } // Start the transaction and save the gallery - if err := r.withTxn(ctx, func(repo models.Repository) error { - ret, err = r.galleryUpdate(input, translator, repo) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.galleryUpdate(ctx, input, translator) return err }); err != nil { return nil, err @@ -158,13 +118,13 @@ func (r *mutationResolver) GalleriesUpdate(ctx context.Context, input []*models. inputMaps := getUpdateInputMaps(ctx) // Start the transaction and save the gallery - if err := r.withTxn(ctx, func(repo models.Repository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { for i, gallery := range input { translator := changesetTranslator{ inputMap: inputMaps[i], } - thisGallery, err := r.galleryUpdate(*gallery, translator, repo) + thisGallery, err := r.galleryUpdate(ctx, *gallery, translator) if err != nil { return err } @@ -196,8 +156,8 @@ func (r *mutationResolver) GalleriesUpdate(ctx context.Context, input []*models. return newRet, nil } -func (r *mutationResolver) galleryUpdate(input models.GalleryUpdateInput, translator changesetTranslator, repo models.Repository) (*models.Gallery, error) { - qb := repo.Gallery() +func (r *mutationResolver) galleryUpdate(ctx context.Context, input models.GalleryUpdateInput, translator changesetTranslator) (*models.Gallery, error) { + qb := r.repository.Gallery // Populate gallery from the input galleryID, err := strconv.Atoi(input.ID) @@ -205,7 +165,7 @@ func (r *mutationResolver) galleryUpdate(input models.GalleryUpdateInput, transl return nil, err } - originalGallery, err := qb.Find(galleryID) + originalGallery, err := qb.Find(ctx, galleryID) if err != nil { return nil, err } @@ -214,11 +174,7 @@ func (r *mutationResolver) galleryUpdate(input models.GalleryUpdateInput, transl return nil, errors.New("not found") } - updatedTime := time.Now() - updatedGallery := models.GalleryPartial{ - ID: galleryID, - UpdatedAt: &models.SQLiteTimestamp{Timestamp: updatedTime}, - } + updatedGallery := models.NewGalleryPartial() if input.Title != nil { // ensure title is not empty @@ -226,124 +182,106 @@ func (r *mutationResolver) galleryUpdate(input models.GalleryUpdateInput, transl return nil, errors.New("title must not be empty") } - // if gallery is not zip-based, then generate the checksum from the title - if !originalGallery.Path.Valid { - checksum := md5.FromString(*input.Title) - updatedGallery.Checksum = &checksum - } - - updatedGallery.Title = &sql.NullString{String: *input.Title, Valid: true} + updatedGallery.Title = models.NewOptionalString(*input.Title) } - updatedGallery.Details = translator.nullString(input.Details, "details") - updatedGallery.URL = translator.nullString(input.URL, "url") - updatedGallery.Date = translator.sqliteDate(input.Date, "date") - updatedGallery.Rating = translator.nullInt64(input.Rating, "rating") - updatedGallery.StudioID = translator.nullInt64FromString(input.StudioID, "studio_id") - updatedGallery.Organized = input.Organized + updatedGallery.Details = translator.optionalString(input.Details, "details") + updatedGallery.URL = translator.optionalString(input.URL, "url") + updatedGallery.Date = translator.optionalDate(input.Date, "date") + updatedGallery.Rating = translator.optionalInt(input.Rating, "rating") + updatedGallery.StudioID, err = translator.optionalIntFromString(input.StudioID, "studio_id") + if err != nil { + return nil, fmt.Errorf("converting studio id: %w", err) + } + updatedGallery.Organized = translator.optionalBool(input.Organized, "organized") + + if translator.hasField("performer_ids") { + updatedGallery.PerformerIDs, err = translateUpdateIDs(input.PerformerIds, models.RelationshipUpdateModeSet) + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) + } + } + + if translator.hasField("tag_ids") { + updatedGallery.TagIDs, err = translateUpdateIDs(input.TagIds, models.RelationshipUpdateModeSet) + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) + } + } + + if translator.hasField("scene_ids") { + updatedGallery.SceneIDs, err = translateUpdateIDs(input.SceneIds, models.RelationshipUpdateModeSet) + if err != nil { + return nil, fmt.Errorf("converting scene ids: %w", err) + } + } // gallery scene is set from the scene only - gallery, err := qb.UpdatePartial(updatedGallery) + gallery, err := qb.UpdatePartial(ctx, galleryID, updatedGallery) if err != nil { return nil, err } - // Save the performers - if translator.hasField("performer_ids") { - if err := r.updateGalleryPerformers(qb, galleryID, input.PerformerIds); err != nil { - return nil, err - } - } - - // Save the tags - if translator.hasField("tag_ids") { - if err := r.updateGalleryTags(qb, galleryID, input.TagIds); err != nil { - return nil, err - } - } - - // Save the scenes - if translator.hasField("scene_ids") { - if err := r.updateGalleryScenes(qb, galleryID, input.SceneIds); err != nil { - return nil, err - } - } - return gallery, nil } -func (r *mutationResolver) BulkGalleryUpdate(ctx context.Context, input models.BulkGalleryUpdateInput) ([]*models.Gallery, error) { +func (r *mutationResolver) BulkGalleryUpdate(ctx context.Context, input BulkGalleryUpdateInput) ([]*models.Gallery, error) { // Populate gallery from the input - updatedTime := time.Now() - translator := changesetTranslator{ inputMap: getUpdateInputMap(ctx), } - updatedGallery := models.GalleryPartial{ - UpdatedAt: &models.SQLiteTimestamp{Timestamp: updatedTime}, + updatedGallery := models.NewGalleryPartial() + + updatedGallery.Details = translator.optionalString(input.Details, "details") + updatedGallery.URL = translator.optionalString(input.URL, "url") + updatedGallery.Date = translator.optionalDate(input.Date, "date") + updatedGallery.Rating = translator.optionalInt(input.Rating, "rating") + + var err error + updatedGallery.StudioID, err = translator.optionalIntFromString(input.StudioID, "studio_id") + if err != nil { + return nil, fmt.Errorf("converting studio id: %w", err) + } + updatedGallery.Organized = translator.optionalBool(input.Organized, "organized") + + if translator.hasField("performer_ids") { + updatedGallery.PerformerIDs, err = translateUpdateIDs(input.PerformerIds.Ids, input.PerformerIds.Mode) + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) + } } - updatedGallery.Details = translator.nullString(input.Details, "details") - updatedGallery.URL = translator.nullString(input.URL, "url") - updatedGallery.Date = translator.sqliteDate(input.Date, "date") - updatedGallery.Rating = translator.nullInt64(input.Rating, "rating") - updatedGallery.StudioID = translator.nullInt64FromString(input.StudioID, "studio_id") - updatedGallery.Organized = input.Organized + if translator.hasField("tag_ids") { + updatedGallery.TagIDs, err = translateUpdateIDs(input.TagIds.Ids, input.TagIds.Mode) + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) + } + } + + if translator.hasField("scene_ids") { + updatedGallery.SceneIDs, err = translateUpdateIDs(input.SceneIds.Ids, input.SceneIds.Mode) + if err != nil { + return nil, fmt.Errorf("converting scene ids: %w", err) + } + } ret := []*models.Gallery{} // Start the transaction and save the galleries - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Gallery() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Gallery for _, galleryIDStr := range input.Ids { galleryID, _ := strconv.Atoi(galleryIDStr) - updatedGallery.ID = galleryID - gallery, err := qb.UpdatePartial(updatedGallery) + gallery, err := qb.UpdatePartial(ctx, galleryID, updatedGallery) if err != nil { return err } ret = append(ret, gallery) - - // Save the performers - if translator.hasField("performer_ids") { - performerIDs, err := adjustGalleryPerformerIDs(qb, galleryID, *input.PerformerIds) - if err != nil { - return err - } - - if err := qb.UpdatePerformers(galleryID, performerIDs); err != nil { - return err - } - } - - // Save the tags - if translator.hasField("tag_ids") { - tagIDs, err := adjustGalleryTagIDs(qb, galleryID, *input.TagIds) - if err != nil { - return err - } - - if err := qb.UpdateTags(galleryID, tagIDs); err != nil { - return err - } - } - - // Save the scenes - if translator.hasField("scene_ids") { - sceneIDs, err := adjustGallerySceneIDs(qb, galleryID, *input.SceneIds) - if err != nil { - return err - } - - if err := qb.UpdateScenes(galleryID, sceneIDs); err != nil { - return err - } - } } return nil @@ -367,31 +305,8 @@ func (r *mutationResolver) BulkGalleryUpdate(ctx context.Context, input models.B return newRet, nil } -func adjustGalleryPerformerIDs(qb models.GalleryReader, galleryID int, ids models.BulkUpdateIds) (ret []int, err error) { - ret, err = qb.GetPerformerIDs(galleryID) - if err != nil { - return nil, err - } - - return adjustIDs(ret, ids), nil -} - -func adjustGalleryTagIDs(qb models.GalleryReader, galleryID int, ids models.BulkUpdateIds) (ret []int, err error) { - ret, err = qb.GetTagIDs(galleryID) - if err != nil { - return nil, err - } - - return adjustIDs(ret, ids), nil -} - -func adjustGallerySceneIDs(qb models.GalleryReader, galleryID int, ids models.BulkUpdateIds) (ret []int, err error) { - ret, err = qb.GetSceneIDs(galleryID) - if err != nil { - return nil, err - } - - return adjustIDs(ret, ids), nil +type GallerySceneGetter interface { + GetSceneIDs(ctx context.Context, galleryID int) ([]int, error) } func (r *mutationResolver) GalleryDestroy(ctx context.Context, input models.GalleryDestroyInput) (bool, error) { @@ -403,19 +318,18 @@ func (r *mutationResolver) GalleryDestroy(ctx context.Context, input models.Gall var galleries []*models.Gallery var imgsDestroyed []*models.Image fileDeleter := &image.FileDeleter{ - Deleter: *file.NewDeleter(), + Deleter: file.NewDeleter(), Paths: manager.GetInstance().Paths, } deleteGenerated := utils.IsTrue(input.DeleteGenerated) deleteFile := utils.IsTrue(input.DeleteFile) - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Gallery() - iqb := repo.Image() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Gallery for _, id := range galleryIDs { - gallery, err := qb.Find(id) + gallery, err := qb.Find(ctx, id) if err != nil { return err } @@ -424,55 +338,14 @@ func (r *mutationResolver) GalleryDestroy(ctx context.Context, input models.Gall return fmt.Errorf("gallery with id %d not found", id) } - galleries = append(galleries, gallery) - - // if this is a zip-based gallery, delete the images as well first - if gallery.Zip { - imgs, err := iqb.FindByGalleryID(id) - if err != nil { - return err - } - - for _, img := range imgs { - if err := image.Destroy(img, iqb, fileDeleter, deleteGenerated, false); err != nil { - return err - } - - imgsDestroyed = append(imgsDestroyed, img) - } - - if deleteFile { - if err := fileDeleter.Files([]string{gallery.Path.String}); err != nil { - return err - } - } - } else if deleteFile { - // Delete image if it is only attached to this gallery - imgs, err := iqb.FindByGalleryID(id) - if err != nil { - return err - } - - for _, img := range imgs { - imgGalleries, err := qb.FindByImageID(img.ID) - if err != nil { - return err - } - - if len(imgGalleries) == 1 { - if err := image.Destroy(img, iqb, fileDeleter, deleteGenerated, deleteFile); err != nil { - return err - } - - imgsDestroyed = append(imgsDestroyed, img) - } - } - - // we only want to delete a folder-based gallery if it is empty. - // don't do this with the file deleter + if err := gallery.LoadFiles(ctx, qb); err != nil { + return fmt.Errorf("loading files for gallery %d", id) } - if err := qb.Destroy(id); err != nil { + galleries = append(galleries, gallery) + + imgsDestroyed, err = r.galleryService.Destroy(ctx, gallery, fileDeleter, deleteGenerated, deleteFile) + if err != nil { return err } } @@ -488,10 +361,11 @@ func (r *mutationResolver) GalleryDestroy(ctx context.Context, input models.Gall for _, gallery := range galleries { // don't delete stash library paths - if utils.IsTrue(input.DeleteFile) && !gallery.Zip && gallery.Path.Valid && !isStashPath(gallery.Path.String) { + path := gallery.Path + if deleteFile && path != "" && !isStashPath(path) { // try to remove the folder - it is possible that it is not empty // so swallow the error if present - _ = os.Remove(gallery.Path.String) + _ = os.Remove(path) } } @@ -499,8 +373,8 @@ func (r *mutationResolver) GalleryDestroy(ctx context.Context, input models.Gall for _, gallery := range galleries { r.hookExecutor.ExecutePostHooks(ctx, gallery.ID, plugin.GalleryDestroyPost, plugin.GalleryDestroyInput{ GalleryDestroyInput: input, - Checksum: gallery.Checksum, - Path: gallery.Path.String, + Checksum: gallery.Checksum(), + Path: gallery.Path, }, nil) } @@ -526,7 +400,7 @@ func isStashPath(path string) bool { return false } -func (r *mutationResolver) AddGalleryImages(ctx context.Context, input models.GalleryAddInput) (bool, error) { +func (r *mutationResolver) AddGalleryImages(ctx context.Context, input GalleryAddInput) (bool, error) { galleryID, err := strconv.Atoi(input.GalleryID) if err != nil { return false, err @@ -537,9 +411,9 @@ func (r *mutationResolver) AddGalleryImages(ctx context.Context, input models.Ga return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Gallery() - gallery, err := qb.Find(galleryID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Gallery + gallery, err := qb.Find(ctx, galleryID) if err != nil { return err } @@ -548,17 +422,13 @@ func (r *mutationResolver) AddGalleryImages(ctx context.Context, input models.Ga return errors.New("gallery not found") } - if gallery.Zip { - return errors.New("cannot modify zip gallery images") - } - - newIDs, err := qb.GetImageIDs(galleryID) + newIDs, err := qb.GetImageIDs(ctx, galleryID) if err != nil { return err } newIDs = intslice.IntAppendUniques(newIDs, imageIDs) - return qb.UpdateImages(galleryID, newIDs) + return qb.UpdateImages(ctx, galleryID, newIDs) }); err != nil { return false, err } @@ -566,7 +436,7 @@ func (r *mutationResolver) AddGalleryImages(ctx context.Context, input models.Ga return true, nil } -func (r *mutationResolver) RemoveGalleryImages(ctx context.Context, input models.GalleryRemoveInput) (bool, error) { +func (r *mutationResolver) RemoveGalleryImages(ctx context.Context, input GalleryRemoveInput) (bool, error) { galleryID, err := strconv.Atoi(input.GalleryID) if err != nil { return false, err @@ -577,9 +447,9 @@ func (r *mutationResolver) RemoveGalleryImages(ctx context.Context, input models return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Gallery() - gallery, err := qb.Find(galleryID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Gallery + gallery, err := qb.Find(ctx, galleryID) if err != nil { return err } @@ -588,17 +458,13 @@ func (r *mutationResolver) RemoveGalleryImages(ctx context.Context, input models return errors.New("gallery not found") } - if gallery.Zip { - return errors.New("cannot modify zip gallery images") - } - - newIDs, err := qb.GetImageIDs(galleryID) + newIDs, err := qb.GetImageIDs(ctx, galleryID) if err != nil { return err } newIDs = intslice.IntExclude(newIDs, imageIDs) - return qb.UpdateImages(galleryID, newIDs) + return qb.UpdateImages(ctx, galleryID, newIDs) }); err != nil { return false, err } diff --git a/internal/api/resolver_mutation_image.go b/internal/api/resolver_mutation_image.go index 4c05c0ee6..419b6132b 100644 --- a/internal/api/resolver_mutation_image.go +++ b/internal/api/resolver_mutation_image.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "strconv" - "time" "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/pkg/file" @@ -16,8 +15,8 @@ import ( ) func (r *mutationResolver) getImage(ctx context.Context, id int) (ret *models.Image, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Image().Find(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Image.Find(ctx, id) return err }); err != nil { return nil, err @@ -26,14 +25,14 @@ func (r *mutationResolver) getImage(ctx context.Context, id int) (ret *models.Im return ret, nil } -func (r *mutationResolver) ImageUpdate(ctx context.Context, input models.ImageUpdateInput) (ret *models.Image, err error) { +func (r *mutationResolver) ImageUpdate(ctx context.Context, input ImageUpdateInput) (ret *models.Image, err error) { translator := changesetTranslator{ inputMap: getUpdateInputMap(ctx), } // Start the transaction and save the image - if err := r.withTxn(ctx, func(repo models.Repository) error { - ret, err = r.imageUpdate(input, translator, repo) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.imageUpdate(ctx, input, translator) return err }); err != nil { return nil, err @@ -44,17 +43,17 @@ func (r *mutationResolver) ImageUpdate(ctx context.Context, input models.ImageUp return r.getImage(ctx, ret.ID) } -func (r *mutationResolver) ImagesUpdate(ctx context.Context, input []*models.ImageUpdateInput) (ret []*models.Image, err error) { +func (r *mutationResolver) ImagesUpdate(ctx context.Context, input []*ImageUpdateInput) (ret []*models.Image, err error) { inputMaps := getUpdateInputMaps(ctx) // Start the transaction and save the image - if err := r.withTxn(ctx, func(repo models.Repository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { for i, image := range input { translator := changesetTranslator{ inputMap: inputMaps[i], } - thisImage, err := r.imageUpdate(*image, translator, repo) + thisImage, err := r.imageUpdate(ctx, *image, translator) if err != nil { return err } @@ -86,148 +85,105 @@ func (r *mutationResolver) ImagesUpdate(ctx context.Context, input []*models.Ima return newRet, nil } -func (r *mutationResolver) imageUpdate(input models.ImageUpdateInput, translator changesetTranslator, repo models.Repository) (*models.Image, error) { +func (r *mutationResolver) imageUpdate(ctx context.Context, input ImageUpdateInput, translator changesetTranslator) (*models.Image, error) { // Populate image from the input imageID, err := strconv.Atoi(input.ID) if err != nil { return nil, err } - updatedTime := time.Now() - updatedImage := models.ImagePartial{ - ID: imageID, - UpdatedAt: &models.SQLiteTimestamp{Timestamp: updatedTime}, - } - - updatedImage.Title = translator.nullString(input.Title, "title") - updatedImage.Rating = translator.nullInt64(input.Rating, "rating") - updatedImage.StudioID = translator.nullInt64FromString(input.StudioID, "studio_id") - updatedImage.Organized = input.Organized - - qb := repo.Image() - image, err := qb.Update(updatedImage) + updatedImage := models.NewImagePartial() + updatedImage.Title = translator.optionalString(input.Title, "title") + updatedImage.Rating = translator.optionalInt(input.Rating, "rating") + updatedImage.StudioID, err = translator.optionalIntFromString(input.StudioID, "studio_id") if err != nil { - return nil, err + return nil, fmt.Errorf("converting studio id: %w", err) } + updatedImage.Organized = translator.optionalBool(input.Organized, "organized") if translator.hasField("gallery_ids") { - if err := r.updateImageGalleries(qb, imageID, input.GalleryIds); err != nil { - return nil, err + updatedImage.GalleryIDs, err = translateUpdateIDs(input.GalleryIds, models.RelationshipUpdateModeSet) + if err != nil { + return nil, fmt.Errorf("converting gallery ids: %w", err) } } - // Save the performers if translator.hasField("performer_ids") { - if err := r.updateImagePerformers(qb, imageID, input.PerformerIds); err != nil { - return nil, err + updatedImage.PerformerIDs, err = translateUpdateIDs(input.PerformerIds, models.RelationshipUpdateModeSet) + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) } } - // Save the tags if translator.hasField("tag_ids") { - if err := r.updateImageTags(qb, imageID, input.TagIds); err != nil { - return nil, err + updatedImage.TagIDs, err = translateUpdateIDs(input.TagIds, models.RelationshipUpdateModeSet) + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) } } + qb := r.repository.Image + image, err := qb.UpdatePartial(ctx, imageID, updatedImage) + if err != nil { + return nil, err + } + return image, nil } -func (r *mutationResolver) updateImageGalleries(qb models.ImageReaderWriter, imageID int, galleryIDs []string) error { - ids, err := stringslice.StringSliceToIntSlice(galleryIDs) - if err != nil { - return err - } - return qb.UpdateGalleries(imageID, ids) -} - -func (r *mutationResolver) updateImagePerformers(qb models.ImageReaderWriter, imageID int, performerIDs []string) error { - ids, err := stringslice.StringSliceToIntSlice(performerIDs) - if err != nil { - return err - } - return qb.UpdatePerformers(imageID, ids) -} - -func (r *mutationResolver) updateImageTags(qb models.ImageReaderWriter, imageID int, tagsIDs []string) error { - ids, err := stringslice.StringSliceToIntSlice(tagsIDs) - if err != nil { - return err - } - return qb.UpdateTags(imageID, ids) -} - -func (r *mutationResolver) BulkImageUpdate(ctx context.Context, input models.BulkImageUpdateInput) (ret []*models.Image, err error) { +func (r *mutationResolver) BulkImageUpdate(ctx context.Context, input BulkImageUpdateInput) (ret []*models.Image, err error) { imageIDs, err := stringslice.StringSliceToIntSlice(input.Ids) if err != nil { return nil, err } // Populate image from the input - updatedTime := time.Now() - - updatedImage := models.ImagePartial{ - UpdatedAt: &models.SQLiteTimestamp{Timestamp: updatedTime}, - } + updatedImage := models.NewImagePartial() translator := changesetTranslator{ inputMap: getUpdateInputMap(ctx), } - updatedImage.Title = translator.nullString(input.Title, "title") - updatedImage.Rating = translator.nullInt64(input.Rating, "rating") - updatedImage.StudioID = translator.nullInt64FromString(input.StudioID, "studio_id") - updatedImage.Organized = input.Organized + updatedImage.Title = translator.optionalString(input.Title, "title") + updatedImage.Rating = translator.optionalInt(input.Rating, "rating") + updatedImage.StudioID, err = translator.optionalIntFromString(input.StudioID, "studio_id") + if err != nil { + return nil, fmt.Errorf("converting studio id: %w", err) + } + updatedImage.Organized = translator.optionalBool(input.Organized, "organized") + + if translator.hasField("gallery_ids") { + updatedImage.GalleryIDs, err = translateUpdateIDs(input.GalleryIds.Ids, input.GalleryIds.Mode) + if err != nil { + return nil, fmt.Errorf("converting gallery ids: %w", err) + } + } + + if translator.hasField("performer_ids") { + updatedImage.PerformerIDs, err = translateUpdateIDs(input.PerformerIds.Ids, input.PerformerIds.Mode) + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) + } + } + + if translator.hasField("tag_ids") { + updatedImage.TagIDs, err = translateUpdateIDs(input.TagIds.Ids, input.TagIds.Mode) + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) + } + } // Start the transaction and save the image marker - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Image() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Image for _, imageID := range imageIDs { - updatedImage.ID = imageID - - image, err := qb.Update(updatedImage) + image, err := qb.UpdatePartial(ctx, imageID, updatedImage) if err != nil { return err } ret = append(ret, image) - - // Save the galleries - if translator.hasField("gallery_ids") { - galleryIDs, err := adjustImageGalleryIDs(qb, imageID, *input.GalleryIds) - if err != nil { - return err - } - - if err := qb.UpdateGalleries(imageID, galleryIDs); err != nil { - return err - } - } - - // Save the performers - if translator.hasField("performer_ids") { - performerIDs, err := adjustImagePerformerIDs(qb, imageID, *input.PerformerIds) - if err != nil { - return err - } - - if err := qb.UpdatePerformers(imageID, performerIDs); err != nil { - return err - } - } - - // Save the tags - if translator.hasField("tag_ids") { - tagIDs, err := adjustImageTagIDs(qb, imageID, *input.TagIds) - if err != nil { - return err - } - - if err := qb.UpdateTags(imageID, tagIDs); err != nil { - return err - } - } } return nil @@ -251,33 +207,6 @@ func (r *mutationResolver) BulkImageUpdate(ctx context.Context, input models.Bul return newRet, nil } -func adjustImageGalleryIDs(qb models.ImageReader, imageID int, ids models.BulkUpdateIds) (ret []int, err error) { - ret, err = qb.GetGalleryIDs(imageID) - if err != nil { - return nil, err - } - - return adjustIDs(ret, ids), nil -} - -func adjustImagePerformerIDs(qb models.ImageReader, imageID int, ids models.BulkUpdateIds) (ret []int, err error) { - ret, err = qb.GetPerformerIDs(imageID) - if err != nil { - return nil, err - } - - return adjustIDs(ret, ids), nil -} - -func adjustImageTagIDs(qb models.ImageReader, imageID int, ids models.BulkUpdateIds) (ret []int, err error) { - ret, err = qb.GetTagIDs(imageID) - if err != nil { - return nil, err - } - - return adjustIDs(ret, ids), nil -} - func (r *mutationResolver) ImageDestroy(ctx context.Context, input models.ImageDestroyInput) (ret bool, err error) { imageID, err := strconv.Atoi(input.ID) if err != nil { @@ -286,13 +215,11 @@ func (r *mutationResolver) ImageDestroy(ctx context.Context, input models.ImageD var i *models.Image fileDeleter := &image.FileDeleter{ - Deleter: *file.NewDeleter(), + Deleter: file.NewDeleter(), Paths: manager.GetInstance().Paths, } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Image() - - i, err = qb.Find(imageID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + i, err = r.repository.Image.Find(ctx, imageID) if err != nil { return err } @@ -301,7 +228,7 @@ func (r *mutationResolver) ImageDestroy(ctx context.Context, input models.ImageD return fmt.Errorf("image with id %d not found", imageID) } - return image.Destroy(i, qb, fileDeleter, utils.IsTrue(input.DeleteGenerated), utils.IsTrue(input.DeleteFile)) + return r.imageService.Destroy(ctx, i, fileDeleter, utils.IsTrue(input.DeleteGenerated), utils.IsTrue(input.DeleteFile)) }); err != nil { fileDeleter.Rollback() return false, err @@ -328,15 +255,14 @@ func (r *mutationResolver) ImagesDestroy(ctx context.Context, input models.Image var images []*models.Image fileDeleter := &image.FileDeleter{ - Deleter: *file.NewDeleter(), + Deleter: file.NewDeleter(), Paths: manager.GetInstance().Paths, } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Image() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Image for _, imageID := range imageIDs { - - i, err := qb.Find(imageID) + i, err := qb.Find(ctx, imageID) if err != nil { return err } @@ -347,7 +273,7 @@ func (r *mutationResolver) ImagesDestroy(ctx context.Context, input models.Image images = append(images, i) - if err := image.Destroy(i, qb, fileDeleter, utils.IsTrue(input.DeleteGenerated), utils.IsTrue(input.DeleteFile)); err != nil { + if err := r.imageService.Destroy(ctx, i, fileDeleter, utils.IsTrue(input.DeleteGenerated), utils.IsTrue(input.DeleteFile)); err != nil { return err } } @@ -379,10 +305,10 @@ func (r *mutationResolver) ImageIncrementO(ctx context.Context, id string) (ret return 0, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Image() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Image - ret, err = qb.IncrementOCounter(imageID) + ret, err = qb.IncrementOCounter(ctx, imageID) return err }); err != nil { return 0, err @@ -397,10 +323,10 @@ func (r *mutationResolver) ImageDecrementO(ctx context.Context, id string) (ret return 0, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Image() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Image - ret, err = qb.DecrementOCounter(imageID) + ret, err = qb.DecrementOCounter(ctx, imageID) return err }); err != nil { return 0, err @@ -415,10 +341,10 @@ func (r *mutationResolver) ImageResetO(ctx context.Context, id string) (ret int, return 0, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Image() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Image - ret, err = qb.ResetOCounter(imageID) + ret, err = qb.ResetOCounter(ctx, imageID) return err }); err != nil { return 0, err diff --git a/internal/api/resolver_mutation_metadata.go b/internal/api/resolver_mutation_metadata.go index b8154614f..ff8635536 100644 --- a/internal/api/resolver_mutation_metadata.go +++ b/internal/api/resolver_mutation_metadata.go @@ -9,15 +9,14 @@ import ( "sync" "time" + "github.com/stashapp/stash/internal/identify" "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/internal/manager/config" - "github.com/stashapp/stash/pkg/database" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" ) -func (r *mutationResolver) MetadataScan(ctx context.Context, input models.ScanMetadataInput) (string, error) { +func (r *mutationResolver) MetadataScan(ctx context.Context, input manager.ScanMetadataInput) (string, error) { jobID, err := manager.GetInstance().Scan(ctx, input) if err != nil { @@ -36,7 +35,7 @@ func (r *mutationResolver) MetadataImport(ctx context.Context) (string, error) { return strconv.Itoa(jobID), nil } -func (r *mutationResolver) ImportObjects(ctx context.Context, input models.ImportObjectsInput) (string, error) { +func (r *mutationResolver) ImportObjects(ctx context.Context, input manager.ImportObjectsInput) (string, error) { t, err := manager.CreateImportTask(config.GetInstance().GetVideoFileNamingAlgorithm(), input) if err != nil { return "", err @@ -56,7 +55,7 @@ func (r *mutationResolver) MetadataExport(ctx context.Context) (string, error) { return strconv.Itoa(jobID), nil } -func (r *mutationResolver) ExportObjects(ctx context.Context, input models.ExportObjectsInput) (*string, error) { +func (r *mutationResolver) ExportObjects(ctx context.Context, input manager.ExportObjectsInput) (*string, error) { t := manager.CreateExportTask(config.GetInstance().GetVideoFileNamingAlgorithm(), input) var wg sync.WaitGroup @@ -75,7 +74,7 @@ func (r *mutationResolver) ExportObjects(ctx context.Context, input models.Expor return nil, nil } -func (r *mutationResolver) MetadataGenerate(ctx context.Context, input models.GenerateMetadataInput) (string, error) { +func (r *mutationResolver) MetadataGenerate(ctx context.Context, input manager.GenerateMetadataInput) (string, error) { jobID, err := manager.GetInstance().Generate(ctx, input) if err != nil { @@ -85,19 +84,19 @@ func (r *mutationResolver) MetadataGenerate(ctx context.Context, input models.Ge return strconv.Itoa(jobID), nil } -func (r *mutationResolver) MetadataAutoTag(ctx context.Context, input models.AutoTagMetadataInput) (string, error) { +func (r *mutationResolver) MetadataAutoTag(ctx context.Context, input manager.AutoTagMetadataInput) (string, error) { jobID := manager.GetInstance().AutoTag(ctx, input) return strconv.Itoa(jobID), nil } -func (r *mutationResolver) MetadataIdentify(ctx context.Context, input models.IdentifyMetadataInput) (string, error) { +func (r *mutationResolver) MetadataIdentify(ctx context.Context, input identify.Options) (string, error) { t := manager.CreateIdentifyJob(input) jobID := manager.GetInstance().JobManager.Add(ctx, "Identifying...", t) return strconv.Itoa(jobID), nil } -func (r *mutationResolver) MetadataClean(ctx context.Context, input models.CleanMetadataInput) (string, error) { +func (r *mutationResolver) MetadataClean(ctx context.Context, input manager.CleanMetadataInput) (string, error) { jobID := manager.GetInstance().Clean(ctx, input) return strconv.Itoa(jobID), nil } @@ -107,10 +106,11 @@ func (r *mutationResolver) MigrateHashNaming(ctx context.Context) (string, error return strconv.Itoa(jobID), nil } -func (r *mutationResolver) BackupDatabase(ctx context.Context, input models.BackupDatabaseInput) (*string, error) { +func (r *mutationResolver) BackupDatabase(ctx context.Context, input BackupDatabaseInput) (*string, error) { // if download is true, then backup to temporary file and return a link download := input.Download != nil && *input.Download mgr := manager.GetInstance() + database := mgr.Database var backupPath string if download { if err := fsutil.EnsureDir(mgr.Paths.Generated.Downloads); err != nil { @@ -127,7 +127,7 @@ func (r *mutationResolver) BackupDatabase(ctx context.Context, input models.Back backupPath = database.DatabaseBackupPath() } - err := database.Backup(database.DB, backupPath) + err := database.Backup(backupPath) if err != nil { return nil, err } diff --git a/internal/api/resolver_mutation_movie.go b/internal/api/resolver_mutation_movie.go index 325d022d3..0a22350b6 100644 --- a/internal/api/resolver_mutation_movie.go +++ b/internal/api/resolver_mutation_movie.go @@ -15,8 +15,8 @@ import ( ) func (r *mutationResolver) getMovie(ctx context.Context, id int) (ret *models.Movie, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Movie().Find(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Movie.Find(ctx, id) return err }); err != nil { return nil, err @@ -25,7 +25,7 @@ func (r *mutationResolver) getMovie(ctx context.Context, id int) (ret *models.Mo return ret, nil } -func (r *mutationResolver) MovieCreate(ctx context.Context, input models.MovieCreateInput) (*models.Movie, error) { +func (r *mutationResolver) MovieCreate(ctx context.Context, input MovieCreateInput) (*models.Movie, error) { // generate checksum from movie name rather than image checksum := md5.FromString(input.Name) @@ -100,16 +100,16 @@ func (r *mutationResolver) MovieCreate(ctx context.Context, input models.MovieCr // Start the transaction and save the movie var movie *models.Movie - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Movie() - movie, err = qb.Create(newMovie) + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Movie + movie, err = qb.Create(ctx, newMovie) if err != nil { return err } // update image table if len(frontimageData) > 0 { - if err := qb.UpdateImages(movie.ID, frontimageData, backimageData); err != nil { + if err := qb.UpdateImages(ctx, movie.ID, frontimageData, backimageData); err != nil { return err } } @@ -123,7 +123,7 @@ func (r *mutationResolver) MovieCreate(ctx context.Context, input models.MovieCr return r.getMovie(ctx, movie.ID) } -func (r *mutationResolver) MovieUpdate(ctx context.Context, input models.MovieUpdateInput) (*models.Movie, error) { +func (r *mutationResolver) MovieUpdate(ctx context.Context, input MovieUpdateInput) (*models.Movie, error) { // Populate movie from the input movieID, err := strconv.Atoi(input.ID) if err != nil { @@ -174,9 +174,9 @@ func (r *mutationResolver) MovieUpdate(ctx context.Context, input models.MovieUp // Start the transaction and save the movie var movie *models.Movie - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Movie() - movie, err = qb.Update(updatedMovie) + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Movie + movie, err = qb.Update(ctx, updatedMovie) if err != nil { return err } @@ -184,13 +184,13 @@ func (r *mutationResolver) MovieUpdate(ctx context.Context, input models.MovieUp // update image table if frontImageIncluded || backImageIncluded { if !frontImageIncluded { - frontimageData, err = qb.GetFrontImage(updatedMovie.ID) + frontimageData, err = qb.GetFrontImage(ctx, updatedMovie.ID) if err != nil { return err } } if !backImageIncluded { - backimageData, err = qb.GetBackImage(updatedMovie.ID) + backimageData, err = qb.GetBackImage(ctx, updatedMovie.ID) if err != nil { return err } @@ -198,7 +198,7 @@ func (r *mutationResolver) MovieUpdate(ctx context.Context, input models.MovieUp if len(frontimageData) == 0 && len(backimageData) == 0 { // both images are being nulled. Destroy them. - if err := qb.DestroyImages(movie.ID); err != nil { + if err := qb.DestroyImages(ctx, movie.ID); err != nil { return err } } else { @@ -208,7 +208,7 @@ func (r *mutationResolver) MovieUpdate(ctx context.Context, input models.MovieUp frontimageData, _ = utils.ProcessImageInput(ctx, models.DefaultMovieImage) } - if err := qb.UpdateImages(movie.ID, frontimageData, backimageData); err != nil { + if err := qb.UpdateImages(ctx, movie.ID, frontimageData, backimageData); err != nil { return err } } @@ -223,7 +223,7 @@ func (r *mutationResolver) MovieUpdate(ctx context.Context, input models.MovieUp return r.getMovie(ctx, movie.ID) } -func (r *mutationResolver) BulkMovieUpdate(ctx context.Context, input models.BulkMovieUpdateInput) ([]*models.Movie, error) { +func (r *mutationResolver) BulkMovieUpdate(ctx context.Context, input BulkMovieUpdateInput) ([]*models.Movie, error) { movieIDs, err := stringslice.StringSliceToIntSlice(input.Ids) if err != nil { return nil, err @@ -245,13 +245,13 @@ func (r *mutationResolver) BulkMovieUpdate(ctx context.Context, input models.Bul ret := []*models.Movie{} - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Movie() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Movie for _, movieID := range movieIDs { updatedMovie.ID = movieID - existing, err := qb.Find(movieID) + existing, err := qb.Find(ctx, movieID) if err != nil { return err } @@ -260,7 +260,7 @@ func (r *mutationResolver) BulkMovieUpdate(ctx context.Context, input models.Bul return fmt.Errorf("movie with id %d not found", movieID) } - movie, err := qb.Update(updatedMovie) + movie, err := qb.Update(ctx, updatedMovie) if err != nil { return err } @@ -288,14 +288,14 @@ func (r *mutationResolver) BulkMovieUpdate(ctx context.Context, input models.Bul return newRet, nil } -func (r *mutationResolver) MovieDestroy(ctx context.Context, input models.MovieDestroyInput) (bool, error) { +func (r *mutationResolver) MovieDestroy(ctx context.Context, input MovieDestroyInput) (bool, error) { id, err := strconv.Atoi(input.ID) if err != nil { return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - return repo.Movie().Destroy(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + return r.repository.Movie.Destroy(ctx, id) }); err != nil { return false, err } @@ -311,10 +311,10 @@ func (r *mutationResolver) MoviesDestroy(ctx context.Context, movieIDs []string) return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Movie() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Movie for _, id := range ids { - if err := qb.Destroy(id); err != nil { + if err := qb.Destroy(ctx, id); err != nil { return err } } diff --git a/internal/api/resolver_mutation_performer.go b/internal/api/resolver_mutation_performer.go index cf6335659..b0ab18852 100644 --- a/internal/api/resolver_mutation_performer.go +++ b/internal/api/resolver_mutation_performer.go @@ -16,8 +16,8 @@ import ( ) func (r *mutationResolver) getPerformer(ctx context.Context, id int) (ret *models.Performer, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Performer().Find(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Performer.Find(ctx, id) return err }); err != nil { return nil, err @@ -26,7 +26,17 @@ func (r *mutationResolver) getPerformer(ctx context.Context, id int) (ret *model return ret, nil } -func (r *mutationResolver) PerformerCreate(ctx context.Context, input models.PerformerCreateInput) (*models.Performer, error) { +func stashIDPtrSliceToSlice(v []*models.StashID) []models.StashID { + ret := make([]models.StashID, len(v)) + for i, vv := range v { + c := vv + ret[i] = *c + } + + return ret +} + +func (r *mutationResolver) PerformerCreate(ctx context.Context, input PerformerCreateInput) (*models.Performer, error) { // generate checksum from performer name rather than image checksum := md5.FromString(input.Name) @@ -129,31 +139,31 @@ func (r *mutationResolver) PerformerCreate(ctx context.Context, input models.Per // Start the transaction and save the performer var performer *models.Performer - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Performer() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Performer - performer, err = qb.Create(newPerformer) + performer, err = qb.Create(ctx, newPerformer) if err != nil { return err } if len(input.TagIds) > 0 { - if err := r.updatePerformerTags(qb, performer.ID, input.TagIds); err != nil { + if err := r.updatePerformerTags(ctx, performer.ID, input.TagIds); err != nil { return err } } // update image table if len(imageData) > 0 { - if err := qb.UpdateImage(performer.ID, imageData); err != nil { + if err := qb.UpdateImage(ctx, performer.ID, imageData); err != nil { return err } } // Save the stash_ids if input.StashIds != nil { - stashIDJoins := models.StashIDsFromInput(input.StashIds) - if err := qb.UpdateStashIDs(performer.ID, stashIDJoins); err != nil { + stashIDJoins := stashIDPtrSliceToSlice(input.StashIds) + if err := qb.UpdateStashIDs(ctx, performer.ID, stashIDJoins); err != nil { return err } } @@ -167,7 +177,7 @@ func (r *mutationResolver) PerformerCreate(ctx context.Context, input models.Per return r.getPerformer(ctx, performer.ID) } -func (r *mutationResolver) PerformerUpdate(ctx context.Context, input models.PerformerUpdateInput) (*models.Performer, error) { +func (r *mutationResolver) PerformerUpdate(ctx context.Context, input PerformerUpdateInput) (*models.Performer, error) { // Populate performer from the input performerID, _ := strconv.Atoi(input.ID) updatedPerformer := models.PerformerPartial{ @@ -230,11 +240,11 @@ func (r *mutationResolver) PerformerUpdate(ctx context.Context, input models.Per // Start the transaction and save the p var p *models.Performer - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Performer() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Performer // need to get existing performer - existing, err := qb.Find(updatedPerformer.ID) + existing, err := qb.Find(ctx, updatedPerformer.ID) if err != nil { return err } @@ -249,34 +259,34 @@ func (r *mutationResolver) PerformerUpdate(ctx context.Context, input models.Per } } - p, err = qb.Update(updatedPerformer) + p, err = qb.Update(ctx, updatedPerformer) if err != nil { return err } // Save the tags if translator.hasField("tag_ids") { - if err := r.updatePerformerTags(qb, p.ID, input.TagIds); err != nil { + if err := r.updatePerformerTags(ctx, p.ID, input.TagIds); err != nil { return err } } // update image table if len(imageData) > 0 { - if err := qb.UpdateImage(p.ID, imageData); err != nil { + if err := qb.UpdateImage(ctx, p.ID, imageData); err != nil { return err } } else if imageIncluded { // must be unsetting - if err := qb.DestroyImage(p.ID); err != nil { + if err := qb.DestroyImage(ctx, p.ID); err != nil { return err } } // Save the stash_ids if translator.hasField("stash_ids") { - stashIDJoins := models.StashIDsFromInput(input.StashIds) - if err := qb.UpdateStashIDs(performerID, stashIDJoins); err != nil { + stashIDJoins := stashIDPtrSliceToSlice(input.StashIds) + if err := qb.UpdateStashIDs(ctx, performerID, stashIDJoins); err != nil { return err } } @@ -290,15 +300,15 @@ func (r *mutationResolver) PerformerUpdate(ctx context.Context, input models.Per return r.getPerformer(ctx, p.ID) } -func (r *mutationResolver) updatePerformerTags(qb models.PerformerReaderWriter, performerID int, tagsIDs []string) error { +func (r *mutationResolver) updatePerformerTags(ctx context.Context, performerID int, tagsIDs []string) error { ids, err := stringslice.StringSliceToIntSlice(tagsIDs) if err != nil { return err } - return qb.UpdateTags(performerID, ids) + return r.repository.Performer.UpdateTags(ctx, performerID, ids) } -func (r *mutationResolver) BulkPerformerUpdate(ctx context.Context, input models.BulkPerformerUpdateInput) ([]*models.Performer, error) { +func (r *mutationResolver) BulkPerformerUpdate(ctx context.Context, input BulkPerformerUpdateInput) ([]*models.Performer, error) { performerIDs, err := stringslice.StringSliceToIntSlice(input.Ids) if err != nil { return nil, err @@ -348,14 +358,14 @@ func (r *mutationResolver) BulkPerformerUpdate(ctx context.Context, input models ret := []*models.Performer{} // Start the transaction and save the scene marker - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Performer() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Performer for _, performerID := range performerIDs { updatedPerformer.ID = performerID // need to get existing performer - existing, err := qb.Find(performerID) + existing, err := qb.Find(ctx, performerID) if err != nil { return err } @@ -368,7 +378,7 @@ func (r *mutationResolver) BulkPerformerUpdate(ctx context.Context, input models return err } - performer, err := qb.Update(updatedPerformer) + performer, err := qb.Update(ctx, updatedPerformer) if err != nil { return err } @@ -377,12 +387,12 @@ func (r *mutationResolver) BulkPerformerUpdate(ctx context.Context, input models // Save the tags if translator.hasField("tag_ids") { - tagIDs, err := adjustTagIDs(qb, performerID, *input.TagIds) + tagIDs, err := adjustTagIDs(ctx, qb, performerID, *input.TagIds) if err != nil { return err } - if err := qb.UpdateTags(performerID, tagIDs); err != nil { + if err := qb.UpdateTags(ctx, performerID, tagIDs); err != nil { return err } } @@ -409,14 +419,14 @@ func (r *mutationResolver) BulkPerformerUpdate(ctx context.Context, input models return newRet, nil } -func (r *mutationResolver) PerformerDestroy(ctx context.Context, input models.PerformerDestroyInput) (bool, error) { +func (r *mutationResolver) PerformerDestroy(ctx context.Context, input PerformerDestroyInput) (bool, error) { id, err := strconv.Atoi(input.ID) if err != nil { return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - return repo.Performer().Destroy(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + return r.repository.Performer.Destroy(ctx, id) }); err != nil { return false, err } @@ -432,10 +442,10 @@ func (r *mutationResolver) PerformersDestroy(ctx context.Context, performerIDs [ return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Performer() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Performer for _, id := range ids { - if err := qb.Destroy(id); err != nil { + if err := qb.Destroy(ctx, id); err != nil { return err } } diff --git a/internal/api/resolver_mutation_plugin.go b/internal/api/resolver_mutation_plugin.go index 48a2a29e2..58ad359b7 100644 --- a/internal/api/resolver_mutation_plugin.go +++ b/internal/api/resolver_mutation_plugin.go @@ -5,10 +5,10 @@ import ( "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/plugin" ) -func (r *mutationResolver) RunPluginTask(ctx context.Context, pluginID string, taskName string, args []*models.PluginArgInput) (string, error) { +func (r *mutationResolver) RunPluginTask(ctx context.Context, pluginID string, taskName string, args []*plugin.PluginArgInput) (string, error) { m := manager.GetInstance() m.RunPluginTask(ctx, pluginID, taskName, args) return "todo", nil diff --git a/internal/api/resolver_mutation_saved_filter.go b/internal/api/resolver_mutation_saved_filter.go index f8467cb5e..a995060ea 100644 --- a/internal/api/resolver_mutation_saved_filter.go +++ b/internal/api/resolver_mutation_saved_filter.go @@ -9,7 +9,7 @@ import ( "github.com/stashapp/stash/pkg/models" ) -func (r *mutationResolver) SaveFilter(ctx context.Context, input models.SaveFilterInput) (ret *models.SavedFilter, err error) { +func (r *mutationResolver) SaveFilter(ctx context.Context, input SaveFilterInput) (ret *models.SavedFilter, err error) { if strings.TrimSpace(input.Name) == "" { return nil, errors.New("name must be non-empty") } @@ -23,17 +23,17 @@ func (r *mutationResolver) SaveFilter(ctx context.Context, input models.SaveFilt id = &idv } - if err := r.withTxn(ctx, func(repo models.Repository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { f := models.SavedFilter{ Mode: input.Mode, Name: input.Name, Filter: input.Filter, } if id == nil { - ret, err = repo.SavedFilter().Create(f) + ret, err = r.repository.SavedFilter.Create(ctx, f) } else { f.ID = *id - ret, err = repo.SavedFilter().Update(f) + ret, err = r.repository.SavedFilter.Update(ctx, f) } return err }); err != nil { @@ -42,14 +42,14 @@ func (r *mutationResolver) SaveFilter(ctx context.Context, input models.SaveFilt return ret, err } -func (r *mutationResolver) DestroySavedFilter(ctx context.Context, input models.DestroyFilterInput) (bool, error) { +func (r *mutationResolver) DestroySavedFilter(ctx context.Context, input DestroyFilterInput) (bool, error) { id, err := strconv.Atoi(input.ID) if err != nil { return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - return repo.SavedFilter().Destroy(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + return r.repository.SavedFilter.Destroy(ctx, id) }); err != nil { return false, err } @@ -57,25 +57,25 @@ func (r *mutationResolver) DestroySavedFilter(ctx context.Context, input models. return true, nil } -func (r *mutationResolver) SetDefaultFilter(ctx context.Context, input models.SetDefaultFilterInput) (bool, error) { - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.SavedFilter() +func (r *mutationResolver) SetDefaultFilter(ctx context.Context, input SetDefaultFilterInput) (bool, error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.SavedFilter if input.Filter == nil { // clearing - def, err := qb.FindDefault(input.Mode) + def, err := qb.FindDefault(ctx, input.Mode) if err != nil { return err } if def != nil { - return qb.Destroy(def.ID) + return qb.Destroy(ctx, def.ID) } return nil } - _, err := qb.SetDefault(models.SavedFilter{ + _, err := qb.SetDefault(ctx, models.SavedFilter{ Mode: input.Mode, Filter: *input.Filter, }) diff --git a/internal/api/resolver_mutation_scene.go b/internal/api/resolver_mutation_scene.go index 3d8a6b238..b09fad894 100644 --- a/internal/api/resolver_mutation_scene.go +++ b/internal/api/resolver_mutation_scene.go @@ -19,8 +19,8 @@ import ( ) func (r *mutationResolver) getScene(ctx context.Context, id int) (ret *models.Scene, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Scene().Find(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Scene.Find(ctx, id) return err }); err != nil { return nil, err @@ -35,8 +35,8 @@ func (r *mutationResolver) SceneUpdate(ctx context.Context, input models.SceneUp } // Start the transaction and save the scene - if err := r.withTxn(ctx, func(repo models.Repository) error { - ret, err = r.sceneUpdate(ctx, input, translator, repo) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.sceneUpdate(ctx, input, translator) return err }); err != nil { return nil, err @@ -50,13 +50,13 @@ func (r *mutationResolver) ScenesUpdate(ctx context.Context, input []*models.Sce inputMaps := getUpdateInputMaps(ctx) // Start the transaction and save the scene - if err := r.withTxn(ctx, func(repo models.Repository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { for i, scene := range input { translator := changesetTranslator{ inputMap: inputMaps[i], } - thisScene, err := r.sceneUpdate(ctx, *scene, translator, repo) + thisScene, err := r.sceneUpdate(ctx, *scene, translator) ret = append(ret, thisScene) if err != nil { @@ -89,7 +89,7 @@ func (r *mutationResolver) ScenesUpdate(ctx context.Context, input []*models.Sce return newRet, nil } -func (r *mutationResolver) sceneUpdate(ctx context.Context, input models.SceneUpdateInput, translator changesetTranslator, repo models.Repository) (*models.Scene, error) { +func (r *mutationResolver) sceneUpdate(ctx context.Context, input models.SceneUpdateInput, translator changesetTranslator) (*models.Scene, error) { // Populate scene from the input sceneID, err := strconv.Atoi(input.ID) if err != nil { @@ -98,19 +98,55 @@ func (r *mutationResolver) sceneUpdate(ctx context.Context, input models.SceneUp var coverImageData []byte - updatedTime := time.Now() - updatedScene := models.ScenePartial{ - ID: sceneID, - UpdatedAt: &models.SQLiteTimestamp{Timestamp: updatedTime}, + updatedScene := models.NewScenePartial() + updatedScene.Title = translator.optionalString(input.Title, "title") + updatedScene.Details = translator.optionalString(input.Details, "details") + updatedScene.URL = translator.optionalString(input.URL, "url") + updatedScene.Date = translator.optionalDate(input.Date, "date") + updatedScene.Rating = translator.optionalInt(input.Rating, "rating") + updatedScene.StudioID, err = translator.optionalIntFromString(input.StudioID, "studio_id") + if err != nil { + return nil, fmt.Errorf("converting studio id: %w", err) } - updatedScene.Title = translator.nullString(input.Title, "title") - updatedScene.Details = translator.nullString(input.Details, "details") - updatedScene.URL = translator.nullString(input.URL, "url") - updatedScene.Date = translator.sqliteDate(input.Date, "date") - updatedScene.Rating = translator.nullInt64(input.Rating, "rating") - updatedScene.StudioID = translator.nullInt64FromString(input.StudioID, "studio_id") - updatedScene.Organized = input.Organized + updatedScene.Organized = translator.optionalBool(input.Organized, "organized") + + if translator.hasField("performer_ids") { + updatedScene.PerformerIDs, err = translateUpdateIDs(input.PerformerIds, models.RelationshipUpdateModeSet) + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) + } + } + + if translator.hasField("tag_ids") { + updatedScene.TagIDs, err = translateUpdateIDs(input.TagIds, models.RelationshipUpdateModeSet) + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) + } + } + + if translator.hasField("gallery_ids") { + updatedScene.GalleryIDs, err = translateUpdateIDs(input.GalleryIds, models.RelationshipUpdateModeSet) + if err != nil { + return nil, fmt.Errorf("converting gallery ids: %w", err) + } + } + + // Save the movies + if translator.hasField("movies") { + updatedScene.MovieIDs, err = models.UpdateMovieIDsFromInput(input.Movies) + if err != nil { + return nil, fmt.Errorf("converting movie ids: %w", err) + } + } + + // Save the stash_ids + if translator.hasField("stash_ids") { + updatedScene.StashIDs = &models.UpdateStashIDs{ + StashIDs: input.StashIds, + Mode: models.RelationshipUpdateModeSet, + } + } if input.CoverImage != nil && *input.CoverImage != "" { var err error @@ -122,51 +158,15 @@ func (r *mutationResolver) sceneUpdate(ctx context.Context, input models.SceneUp // update the cover after updating the scene } - qb := repo.Scene() - s, err := qb.Update(updatedScene) + qb := r.repository.Scene + s, err := qb.UpdatePartial(ctx, sceneID, updatedScene) if err != nil { return nil, err } // update cover table if len(coverImageData) > 0 { - if err := qb.UpdateCover(sceneID, coverImageData); err != nil { - return nil, err - } - } - - // Save the performers - if translator.hasField("performer_ids") { - if err := r.updateScenePerformers(qb, sceneID, input.PerformerIds); err != nil { - return nil, err - } - } - - // Save the movies - if translator.hasField("movies") { - if err := r.updateSceneMovies(qb, sceneID, input.Movies); err != nil { - return nil, err - } - } - - // Save the tags - if translator.hasField("tag_ids") { - if err := r.updateSceneTags(qb, sceneID, input.TagIds); err != nil { - return nil, err - } - } - - // Save the galleries - if translator.hasField("gallery_ids") { - if err := r.updateSceneGalleries(qb, sceneID, input.GalleryIds); err != nil { - return nil, err - } - } - - // Save the stash_ids - if translator.hasField("stash_ids") { - stashIDJoins := models.StashIDsFromInput(input.StashIds) - if err := qb.UpdateStashIDs(sceneID, stashIDJoins); err != nil { + if err := qb.UpdateCover(ctx, sceneID, coverImageData); err != nil { return nil, err } } @@ -182,144 +182,72 @@ func (r *mutationResolver) sceneUpdate(ctx context.Context, input models.SceneUp return s, nil } -func (r *mutationResolver) updateScenePerformers(qb models.SceneReaderWriter, sceneID int, performerIDs []string) error { - ids, err := stringslice.StringSliceToIntSlice(performerIDs) - if err != nil { - return err - } - return qb.UpdatePerformers(sceneID, ids) -} - -func (r *mutationResolver) updateSceneMovies(qb models.SceneReaderWriter, sceneID int, movies []*models.SceneMovieInput) error { - var movieJoins []models.MoviesScenes - - for _, movie := range movies { - movieID, err := strconv.Atoi(movie.MovieID) - if err != nil { - return err - } - - movieJoin := models.MoviesScenes{ - MovieID: movieID, - } - - if movie.SceneIndex != nil { - movieJoin.SceneIndex = sql.NullInt64{ - Int64: int64(*movie.SceneIndex), - Valid: true, - } - } - - movieJoins = append(movieJoins, movieJoin) - } - - return qb.UpdateMovies(sceneID, movieJoins) -} - -func (r *mutationResolver) updateSceneTags(qb models.SceneReaderWriter, sceneID int, tagsIDs []string) error { - ids, err := stringslice.StringSliceToIntSlice(tagsIDs) - if err != nil { - return err - } - return qb.UpdateTags(sceneID, ids) -} - -func (r *mutationResolver) updateSceneGalleries(qb models.SceneReaderWriter, sceneID int, galleryIDs []string) error { - ids, err := stringslice.StringSliceToIntSlice(galleryIDs) - if err != nil { - return err - } - return qb.UpdateGalleries(sceneID, ids) -} - -func (r *mutationResolver) BulkSceneUpdate(ctx context.Context, input models.BulkSceneUpdateInput) ([]*models.Scene, error) { +func (r *mutationResolver) BulkSceneUpdate(ctx context.Context, input BulkSceneUpdateInput) ([]*models.Scene, error) { sceneIDs, err := stringslice.StringSliceToIntSlice(input.Ids) if err != nil { return nil, err } // Populate scene from the input - updatedTime := time.Now() - translator := changesetTranslator{ inputMap: getUpdateInputMap(ctx), } - updatedScene := models.ScenePartial{ - UpdatedAt: &models.SQLiteTimestamp{Timestamp: updatedTime}, + updatedScene := models.NewScenePartial() + updatedScene.Title = translator.optionalString(input.Title, "title") + updatedScene.Details = translator.optionalString(input.Details, "details") + updatedScene.URL = translator.optionalString(input.URL, "url") + updatedScene.Date = translator.optionalDate(input.Date, "date") + updatedScene.Rating = translator.optionalInt(input.Rating, "rating") + updatedScene.StudioID, err = translator.optionalIntFromString(input.StudioID, "studio_id") + if err != nil { + return nil, fmt.Errorf("converting studio id: %w", err) } - updatedScene.Title = translator.nullString(input.Title, "title") - updatedScene.Details = translator.nullString(input.Details, "details") - updatedScene.URL = translator.nullString(input.URL, "url") - updatedScene.Date = translator.sqliteDate(input.Date, "date") - updatedScene.Rating = translator.nullInt64(input.Rating, "rating") - updatedScene.StudioID = translator.nullInt64FromString(input.StudioID, "studio_id") - updatedScene.Organized = input.Organized + updatedScene.Organized = translator.optionalBool(input.Organized, "organized") + + if translator.hasField("performer_ids") { + updatedScene.PerformerIDs, err = translateUpdateIDs(input.PerformerIds.Ids, input.PerformerIds.Mode) + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) + } + } + + if translator.hasField("tag_ids") { + updatedScene.TagIDs, err = translateUpdateIDs(input.TagIds.Ids, input.TagIds.Mode) + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) + } + } + + if translator.hasField("gallery_ids") { + updatedScene.GalleryIDs, err = translateUpdateIDs(input.GalleryIds.Ids, input.GalleryIds.Mode) + if err != nil { + return nil, fmt.Errorf("converting gallery ids: %w", err) + } + } + + // Save the movies + if translator.hasField("movies") { + updatedScene.MovieIDs, err = translateSceneMovieIDs(*input.MovieIds) + if err != nil { + return nil, fmt.Errorf("converting movie ids: %w", err) + } + } ret := []*models.Scene{} // Start the transaction and save the scene marker - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Scene() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene for _, sceneID := range sceneIDs { - updatedScene.ID = sceneID - - scene, err := qb.Update(updatedScene) + scene, err := qb.UpdatePartial(ctx, sceneID, updatedScene) if err != nil { return err } ret = append(ret, scene) - - // Save the performers - if translator.hasField("performer_ids") { - performerIDs, err := adjustScenePerformerIDs(qb, sceneID, *input.PerformerIds) - if err != nil { - return err - } - - if err := qb.UpdatePerformers(sceneID, performerIDs); err != nil { - return err - } - } - - // Save the tags - if translator.hasField("tag_ids") { - tagIDs, err := adjustTagIDs(qb, sceneID, *input.TagIds) - if err != nil { - return err - } - - if err := qb.UpdateTags(sceneID, tagIDs); err != nil { - return err - } - } - - // Save the galleries - if translator.hasField("gallery_ids") { - galleryIDs, err := adjustSceneGalleryIDs(qb, sceneID, *input.GalleryIds) - if err != nil { - return err - } - - if err := qb.UpdateGalleries(sceneID, galleryIDs); err != nil { - return err - } - } - - // Save the movies - if translator.hasField("movie_ids") { - movies, err := adjustSceneMovieIDs(qb, sceneID, *input.MovieIds) - if err != nil { - return err - } - - if err := qb.UpdateMovies(sceneID, movies); err != nil { - return err - } - } } return nil @@ -343,9 +271,9 @@ func (r *mutationResolver) BulkSceneUpdate(ctx context.Context, input models.Bul return newRet, nil } -func adjustIDs(existingIDs []int, updateIDs models.BulkUpdateIds) []int { +func adjustIDs(existingIDs []int, updateIDs BulkUpdateIds) []int { // if we are setting the ids, just return the ids - if updateIDs.Mode == models.BulkUpdateIDModeSet { + if updateIDs.Mode == models.RelationshipUpdateModeSet { existingIDs = []int{} for _, idStr := range updateIDs.Ids { id, _ := strconv.Atoi(idStr) @@ -362,7 +290,7 @@ func adjustIDs(existingIDs []int, updateIDs models.BulkUpdateIds) []int { foundExisting := false for idx, existingID := range existingIDs { if existingID == id { - if updateIDs.Mode == models.BulkUpdateIDModeRemove { + if updateIDs.Mode == models.RelationshipUpdateModeRemove { // remove from the list existingIDs = append(existingIDs[:idx], existingIDs[idx+1:]...) } @@ -372,7 +300,7 @@ func adjustIDs(existingIDs []int, updateIDs models.BulkUpdateIds) []int { } } - if !foundExisting && updateIDs.Mode != models.BulkUpdateIDModeRemove { + if !foundExisting && updateIDs.Mode != models.RelationshipUpdateModeRemove { existingIDs = append(existingIDs, id) } } @@ -380,21 +308,12 @@ func adjustIDs(existingIDs []int, updateIDs models.BulkUpdateIds) []int { return existingIDs } -func adjustScenePerformerIDs(qb models.SceneReader, sceneID int, ids models.BulkUpdateIds) (ret []int, err error) { - ret, err = qb.GetPerformerIDs(sceneID) - if err != nil { - return nil, err - } - - return adjustIDs(ret, ids), nil -} - type tagIDsGetter interface { - GetTagIDs(id int) ([]int, error) + GetTagIDs(ctx context.Context, id int) ([]int, error) } -func adjustTagIDs(qb tagIDsGetter, sceneID int, ids models.BulkUpdateIds) (ret []int, err error) { - ret, err = qb.GetTagIDs(sceneID) +func adjustTagIDs(ctx context.Context, qb tagIDsGetter, sceneID int, ids BulkUpdateIds) (ret []int, err error) { + ret, err = qb.GetTagIDs(ctx, sceneID) if err != nil { return nil, err } @@ -402,57 +321,6 @@ func adjustTagIDs(qb tagIDsGetter, sceneID int, ids models.BulkUpdateIds) (ret [ return adjustIDs(ret, ids), nil } -func adjustSceneGalleryIDs(qb models.SceneReader, sceneID int, ids models.BulkUpdateIds) (ret []int, err error) { - ret, err = qb.GetGalleryIDs(sceneID) - if err != nil { - return nil, err - } - - return adjustIDs(ret, ids), nil -} - -func adjustSceneMovieIDs(qb models.SceneReader, sceneID int, updateIDs models.BulkUpdateIds) ([]models.MoviesScenes, error) { - existingMovies, err := qb.GetMovies(sceneID) - if err != nil { - return nil, err - } - - // if we are setting the ids, just return the ids - if updateIDs.Mode == models.BulkUpdateIDModeSet { - existingMovies = []models.MoviesScenes{} - for _, idStr := range updateIDs.Ids { - id, _ := strconv.Atoi(idStr) - existingMovies = append(existingMovies, models.MoviesScenes{MovieID: id}) - } - - return existingMovies, nil - } - - for _, idStr := range updateIDs.Ids { - id, _ := strconv.Atoi(idStr) - - // look for the id in the list - foundExisting := false - for idx, existingMovie := range existingMovies { - if existingMovie.MovieID == id { - if updateIDs.Mode == models.BulkUpdateIDModeRemove { - // remove from the list - existingMovies = append(existingMovies[:idx], existingMovies[idx+1:]...) - } - - foundExisting = true - break - } - } - - if !foundExisting && updateIDs.Mode != models.BulkUpdateIDModeRemove { - existingMovies = append(existingMovies, models.MoviesScenes{MovieID: id}) - } - } - - return existingMovies, err -} - func (r *mutationResolver) SceneDestroy(ctx context.Context, input models.SceneDestroyInput) (bool, error) { sceneID, err := strconv.Atoi(input.ID) if err != nil { @@ -463,7 +331,7 @@ func (r *mutationResolver) SceneDestroy(ctx context.Context, input models.SceneD var s *models.Scene fileDeleter := &scene.FileDeleter{ - Deleter: *file.NewDeleter(), + Deleter: file.NewDeleter(), FileNamingAlgo: fileNamingAlgo, Paths: manager.GetInstance().Paths, } @@ -471,10 +339,10 @@ func (r *mutationResolver) SceneDestroy(ctx context.Context, input models.SceneD deleteGenerated := utils.IsTrue(input.DeleteGenerated) deleteFile := utils.IsTrue(input.DeleteFile) - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Scene() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene var err error - s, err = qb.Find(sceneID) + s, err = qb.Find(ctx, sceneID) if err != nil { return err } @@ -486,7 +354,7 @@ func (r *mutationResolver) SceneDestroy(ctx context.Context, input models.SceneD // kill any running encoders manager.KillRunningStreams(s, fileNamingAlgo) - return scene.Destroy(s, repo, fileDeleter, deleteGenerated, deleteFile) + return r.sceneService.Destroy(ctx, s, fileDeleter, deleteGenerated, deleteFile) }); err != nil { fileDeleter.Rollback() return false, err @@ -498,8 +366,8 @@ func (r *mutationResolver) SceneDestroy(ctx context.Context, input models.SceneD // call post hook after performing the other actions r.hookExecutor.ExecutePostHooks(ctx, s.ID, plugin.SceneDestroyPost, plugin.SceneDestroyInput{ SceneDestroyInput: input, - Checksum: s.Checksum.String, - OSHash: s.OSHash.String, + Checksum: s.Checksum, + OSHash: s.OSHash, Path: s.Path, }, nil) @@ -511,7 +379,7 @@ func (r *mutationResolver) ScenesDestroy(ctx context.Context, input models.Scene fileNamingAlgo := manager.GetInstance().Config.GetVideoFileNamingAlgorithm() fileDeleter := &scene.FileDeleter{ - Deleter: *file.NewDeleter(), + Deleter: file.NewDeleter(), FileNamingAlgo: fileNamingAlgo, Paths: manager.GetInstance().Paths, } @@ -519,13 +387,13 @@ func (r *mutationResolver) ScenesDestroy(ctx context.Context, input models.Scene deleteGenerated := utils.IsTrue(input.DeleteGenerated) deleteFile := utils.IsTrue(input.DeleteFile) - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Scene() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene for _, id := range input.Ids { sceneID, _ := strconv.Atoi(id) - s, err := qb.Find(sceneID) + s, err := qb.Find(ctx, sceneID) if err != nil { return err } @@ -536,7 +404,7 @@ func (r *mutationResolver) ScenesDestroy(ctx context.Context, input models.Scene // kill any running encoders manager.KillRunningStreams(s, fileNamingAlgo) - if err := scene.Destroy(s, repo, fileDeleter, deleteGenerated, deleteFile); err != nil { + if err := r.sceneService.Destroy(ctx, s, fileDeleter, deleteGenerated, deleteFile); err != nil { return err } } @@ -554,8 +422,8 @@ func (r *mutationResolver) ScenesDestroy(ctx context.Context, input models.Scene // call post hook after performing the other actions r.hookExecutor.ExecutePostHooks(ctx, scene.ID, plugin.SceneDestroyPost, plugin.ScenesDestroyInput{ ScenesDestroyInput: input, - Checksum: scene.Checksum.String, - OSHash: scene.OSHash.String, + Checksum: scene.Checksum, + OSHash: scene.OSHash, Path: scene.Path, }, nil) } @@ -564,8 +432,8 @@ func (r *mutationResolver) ScenesDestroy(ctx context.Context, input models.Scene } func (r *mutationResolver) getSceneMarker(ctx context.Context, id int) (ret *models.SceneMarker, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.SceneMarker().Find(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.SceneMarker.Find(ctx, id) return err }); err != nil { return nil, err @@ -574,7 +442,7 @@ func (r *mutationResolver) getSceneMarker(ctx context.Context, id int) (ret *mod return ret, nil } -func (r *mutationResolver) SceneMarkerCreate(ctx context.Context, input models.SceneMarkerCreateInput) (*models.SceneMarker, error) { +func (r *mutationResolver) SceneMarkerCreate(ctx context.Context, input SceneMarkerCreateInput) (*models.SceneMarker, error) { primaryTagID, err := strconv.Atoi(input.PrimaryTagID) if err != nil { return nil, err @@ -609,7 +477,7 @@ func (r *mutationResolver) SceneMarkerCreate(ctx context.Context, input models.S return r.getSceneMarker(ctx, ret.ID) } -func (r *mutationResolver) SceneMarkerUpdate(ctx context.Context, input models.SceneMarkerUpdateInput) (*models.SceneMarker, error) { +func (r *mutationResolver) SceneMarkerUpdate(ctx context.Context, input SceneMarkerUpdateInput) (*models.SceneMarker, error) { // Populate scene marker from the input sceneMarkerID, err := strconv.Atoi(input.ID) if err != nil { @@ -661,16 +529,16 @@ func (r *mutationResolver) SceneMarkerDestroy(ctx context.Context, id string) (b fileNamingAlgo := manager.GetInstance().Config.GetVideoFileNamingAlgorithm() fileDeleter := &scene.FileDeleter{ - Deleter: *file.NewDeleter(), + Deleter: file.NewDeleter(), FileNamingAlgo: fileNamingAlgo, Paths: manager.GetInstance().Paths, } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.SceneMarker() - sqb := repo.Scene() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.SceneMarker + sqb := r.repository.Scene - marker, err := qb.Find(markerID) + marker, err := qb.Find(ctx, markerID) if err != nil { return err @@ -680,12 +548,12 @@ func (r *mutationResolver) SceneMarkerDestroy(ctx context.Context, id string) (b return fmt.Errorf("scene marker with id %d not found", markerID) } - s, err := sqb.Find(int(marker.SceneID.Int64)) + s, err := sqb.Find(ctx, int(marker.SceneID.Int64)) if err != nil { return err } - return scene.DestroyMarker(s, marker, qb, fileDeleter) + return scene.DestroyMarker(ctx, s, marker, qb, fileDeleter) }); err != nil { fileDeleter.Rollback() return false, err @@ -707,32 +575,32 @@ func (r *mutationResolver) changeMarker(ctx context.Context, changeType int, cha fileNamingAlgo := manager.GetInstance().Config.GetVideoFileNamingAlgorithm() fileDeleter := &scene.FileDeleter{ - Deleter: *file.NewDeleter(), + Deleter: file.NewDeleter(), FileNamingAlgo: fileNamingAlgo, Paths: manager.GetInstance().Paths, } // Start the transaction and save the scene marker - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.SceneMarker() - sqb := repo.Scene() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.SceneMarker + sqb := r.repository.Scene var err error switch changeType { case create: - sceneMarker, err = qb.Create(changedMarker) + sceneMarker, err = qb.Create(ctx, changedMarker) case update: // check to see if timestamp was changed - existingMarker, err = qb.Find(changedMarker.ID) + existingMarker, err = qb.Find(ctx, changedMarker.ID) if err != nil { return err } - sceneMarker, err = qb.Update(changedMarker) + sceneMarker, err = qb.Update(ctx, changedMarker) if err != nil { return err } - s, err = sqb.Find(int(existingMarker.SceneID.Int64)) + s, err = sqb.Find(ctx, int(existingMarker.SceneID.Int64)) } if err != nil { return err @@ -749,7 +617,7 @@ func (r *mutationResolver) changeMarker(ctx context.Context, changeType int, cha // Save the marker tags // If this tag is the primary tag, then let's not add it. tagIDs = intslice.IntExclude(tagIDs, []int{changedMarker.PrimaryTagID}) - return qb.UpdateTags(sceneMarker.ID, tagIDs) + return qb.UpdateTags(ctx, sceneMarker.ID, tagIDs) }); err != nil { fileDeleter.Rollback() return nil, err @@ -766,10 +634,10 @@ func (r *mutationResolver) SceneIncrementO(ctx context.Context, id string) (ret return 0, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Scene() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene - ret, err = qb.IncrementOCounter(sceneID) + ret, err = qb.IncrementOCounter(ctx, sceneID) return err }); err != nil { return 0, err @@ -784,10 +652,10 @@ func (r *mutationResolver) SceneDecrementO(ctx context.Context, id string) (ret return 0, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Scene() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene - ret, err = qb.DecrementOCounter(sceneID) + ret, err = qb.DecrementOCounter(ctx, sceneID) return err }); err != nil { return 0, err @@ -802,10 +670,10 @@ func (r *mutationResolver) SceneResetO(ctx context.Context, id string) (ret int, return 0, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Scene() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene - ret, err = qb.ResetOCounter(sceneID) + ret, err = qb.ResetOCounter(ctx, sceneID) return err }); err != nil { return 0, err diff --git a/internal/api/resolver_mutation_stash_box.go b/internal/api/resolver_mutation_stash_box.go index 2b6d259ad..22cc1799e 100644 --- a/internal/api/resolver_mutation_stash_box.go +++ b/internal/api/resolver_mutation_stash_box.go @@ -7,35 +7,43 @@ import ( "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/internal/manager/config" - "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scraper/stashbox" ) -func (r *mutationResolver) SubmitStashBoxFingerprints(ctx context.Context, input models.StashBoxFingerprintSubmissionInput) (bool, error) { +func (r *Resolver) stashboxRepository() stashbox.Repository { + return stashbox.Repository{ + Scene: r.repository.Scene, + Performer: r.repository.Performer, + Tag: r.repository.Tag, + Studio: r.repository.Studio, + } +} + +func (r *mutationResolver) SubmitStashBoxFingerprints(ctx context.Context, input StashBoxFingerprintSubmissionInput) (bool, error) { boxes := config.GetInstance().GetStashBoxes() if input.StashBoxIndex < 0 || input.StashBoxIndex >= len(boxes) { return false, fmt.Errorf("invalid stash_box_index %d", input.StashBoxIndex) } - client := stashbox.NewClient(*boxes[input.StashBoxIndex], r.txnManager) + client := stashbox.NewClient(*boxes[input.StashBoxIndex], r.txnManager, r.stashboxRepository()) return client.SubmitStashBoxFingerprints(ctx, input.SceneIds, boxes[input.StashBoxIndex].Endpoint) } -func (r *mutationResolver) StashBoxBatchPerformerTag(ctx context.Context, input models.StashBoxBatchPerformerTagInput) (string, error) { +func (r *mutationResolver) StashBoxBatchPerformerTag(ctx context.Context, input manager.StashBoxBatchPerformerTagInput) (string, error) { jobID := manager.GetInstance().StashBoxBatchPerformerTag(ctx, input) return strconv.Itoa(jobID), nil } -func (r *mutationResolver) SubmitStashBoxSceneDraft(ctx context.Context, input models.StashBoxDraftSubmissionInput) (*string, error) { +func (r *mutationResolver) SubmitStashBoxSceneDraft(ctx context.Context, input StashBoxDraftSubmissionInput) (*string, error) { boxes := config.GetInstance().GetStashBoxes() if input.StashBoxIndex < 0 || input.StashBoxIndex >= len(boxes) { return nil, fmt.Errorf("invalid stash_box_index %d", input.StashBoxIndex) } - client := stashbox.NewClient(*boxes[input.StashBoxIndex], r.txnManager) + client := stashbox.NewClient(*boxes[input.StashBoxIndex], r.txnManager, r.stashboxRepository()) id, err := strconv.Atoi(input.ID) if err != nil { @@ -43,29 +51,30 @@ func (r *mutationResolver) SubmitStashBoxSceneDraft(ctx context.Context, input m } var res *string - err = r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - qb := repo.Scene() - scene, err := qb.Find(id) + err = r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene + scene, err := qb.Find(ctx, id) if err != nil { return err } + filepath := manager.GetInstance().Paths.Scene.GetScreenshotPath(scene.GetHash(config.GetInstance().GetVideoFileNamingAlgorithm())) - res, err = client.SubmitSceneDraft(ctx, id, boxes[input.StashBoxIndex].Endpoint, filepath) + res, err = client.SubmitSceneDraft(ctx, scene, boxes[input.StashBoxIndex].Endpoint, filepath) return err }) return res, err } -func (r *mutationResolver) SubmitStashBoxPerformerDraft(ctx context.Context, input models.StashBoxDraftSubmissionInput) (*string, error) { +func (r *mutationResolver) SubmitStashBoxPerformerDraft(ctx context.Context, input StashBoxDraftSubmissionInput) (*string, error) { boxes := config.GetInstance().GetStashBoxes() if input.StashBoxIndex < 0 || input.StashBoxIndex >= len(boxes) { return nil, fmt.Errorf("invalid stash_box_index %d", input.StashBoxIndex) } - client := stashbox.NewClient(*boxes[input.StashBoxIndex], r.txnManager) + client := stashbox.NewClient(*boxes[input.StashBoxIndex], r.txnManager, r.stashboxRepository()) id, err := strconv.Atoi(input.ID) if err != nil { @@ -73,9 +82,9 @@ func (r *mutationResolver) SubmitStashBoxPerformerDraft(ctx context.Context, inp } var res *string - err = r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - qb := repo.Performer() - performer, err := qb.Find(id) + err = r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Performer + performer, err := qb.Find(ctx, id) if err != nil { return err } diff --git a/internal/api/resolver_mutation_studio.go b/internal/api/resolver_mutation_studio.go index 23f2a9bdc..e9ee8965b 100644 --- a/internal/api/resolver_mutation_studio.go +++ b/internal/api/resolver_mutation_studio.go @@ -17,8 +17,8 @@ import ( ) func (r *mutationResolver) getStudio(ctx context.Context, id int) (ret *models.Studio, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Studio().Find(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Studio.Find(ctx, id) return err }); err != nil { return nil, err @@ -27,7 +27,7 @@ func (r *mutationResolver) getStudio(ctx context.Context, id int) (ret *models.S return ret, nil } -func (r *mutationResolver) StudioCreate(ctx context.Context, input models.StudioCreateInput) (*models.Studio, error) { +func (r *mutationResolver) StudioCreate(ctx context.Context, input StudioCreateInput) (*models.Studio, error) { // generate checksum from studio name rather than image checksum := md5.FromString(input.Name) @@ -72,36 +72,36 @@ func (r *mutationResolver) StudioCreate(ctx context.Context, input models.Studio // Start the transaction and save the studio var s *models.Studio - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Studio() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Studio var err error - s, err = qb.Create(newStudio) + s, err = qb.Create(ctx, newStudio) if err != nil { return err } // update image table if len(imageData) > 0 { - if err := qb.UpdateImage(s.ID, imageData); err != nil { + if err := qb.UpdateImage(ctx, s.ID, imageData); err != nil { return err } } // Save the stash_ids if input.StashIds != nil { - stashIDJoins := models.StashIDsFromInput(input.StashIds) - if err := qb.UpdateStashIDs(s.ID, stashIDJoins); err != nil { + stashIDJoins := stashIDPtrSliceToSlice(input.StashIds) + if err := qb.UpdateStashIDs(ctx, s.ID, stashIDJoins); err != nil { return err } } if len(input.Aliases) > 0 { - if err := studio.EnsureAliasesUnique(s.ID, input.Aliases, qb); err != nil { + if err := studio.EnsureAliasesUnique(ctx, s.ID, input.Aliases, qb); err != nil { return err } - if err := qb.UpdateAliases(s.ID, input.Aliases); err != nil { + if err := qb.UpdateAliases(ctx, s.ID, input.Aliases); err != nil { return err } } @@ -115,7 +115,7 @@ func (r *mutationResolver) StudioCreate(ctx context.Context, input models.Studio return r.getStudio(ctx, s.ID) } -func (r *mutationResolver) StudioUpdate(ctx context.Context, input models.StudioUpdateInput) (*models.Studio, error) { +func (r *mutationResolver) StudioUpdate(ctx context.Context, input StudioUpdateInput) (*models.Studio, error) { // Populate studio from the input studioID, err := strconv.Atoi(input.ID) if err != nil { @@ -155,45 +155,45 @@ func (r *mutationResolver) StudioUpdate(ctx context.Context, input models.Studio // Start the transaction and save the studio var s *models.Studio - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Studio() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Studio - if err := manager.ValidateModifyStudio(updatedStudio, qb); err != nil { + if err := manager.ValidateModifyStudio(ctx, updatedStudio, qb); err != nil { return err } var err error - s, err = qb.Update(updatedStudio) + s, err = qb.Update(ctx, updatedStudio) if err != nil { return err } // update image table if len(imageData) > 0 { - if err := qb.UpdateImage(s.ID, imageData); err != nil { + if err := qb.UpdateImage(ctx, s.ID, imageData); err != nil { return err } } else if imageIncluded { // must be unsetting - if err := qb.DestroyImage(s.ID); err != nil { + if err := qb.DestroyImage(ctx, s.ID); err != nil { return err } } // Save the stash_ids if translator.hasField("stash_ids") { - stashIDJoins := models.StashIDsFromInput(input.StashIds) - if err := qb.UpdateStashIDs(studioID, stashIDJoins); err != nil { + stashIDJoins := stashIDPtrSliceToSlice(input.StashIds) + if err := qb.UpdateStashIDs(ctx, studioID, stashIDJoins); err != nil { return err } } if translator.hasField("aliases") { - if err := studio.EnsureAliasesUnique(studioID, input.Aliases, qb); err != nil { + if err := studio.EnsureAliasesUnique(ctx, studioID, input.Aliases, qb); err != nil { return err } - if err := qb.UpdateAliases(studioID, input.Aliases); err != nil { + if err := qb.UpdateAliases(ctx, studioID, input.Aliases); err != nil { return err } } @@ -207,14 +207,14 @@ func (r *mutationResolver) StudioUpdate(ctx context.Context, input models.Studio return r.getStudio(ctx, s.ID) } -func (r *mutationResolver) StudioDestroy(ctx context.Context, input models.StudioDestroyInput) (bool, error) { +func (r *mutationResolver) StudioDestroy(ctx context.Context, input StudioDestroyInput) (bool, error) { id, err := strconv.Atoi(input.ID) if err != nil { return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - return repo.Studio().Destroy(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + return r.repository.Studio.Destroy(ctx, id) }); err != nil { return false, err } @@ -230,10 +230,10 @@ func (r *mutationResolver) StudiosDestroy(ctx context.Context, studioIDs []strin return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Studio() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Studio for _, id := range ids { - if err := qb.Destroy(id); err != nil { + if err := qb.Destroy(ctx, id); err != nil { return err } } diff --git a/internal/api/resolver_mutation_tag.go b/internal/api/resolver_mutation_tag.go index 680479b8d..f5befeba7 100644 --- a/internal/api/resolver_mutation_tag.go +++ b/internal/api/resolver_mutation_tag.go @@ -15,8 +15,8 @@ import ( ) func (r *mutationResolver) getTag(ctx context.Context, id int) (ret *models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().Find(id) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Tag.Find(ctx, id) return err }); err != nil { return nil, err @@ -25,7 +25,7 @@ func (r *mutationResolver) getTag(ctx context.Context, id int) (ret *models.Tag, return ret, nil } -func (r *mutationResolver) TagCreate(ctx context.Context, input models.TagCreateInput) (*models.Tag, error) { +func (r *mutationResolver) TagCreate(ctx context.Context, input TagCreateInput) (*models.Tag, error) { // Populate a new tag from the input currentTime := time.Now() newTag := models.Tag{ @@ -68,44 +68,44 @@ func (r *mutationResolver) TagCreate(ctx context.Context, input models.TagCreate // Start the transaction and save the tag var t *models.Tag - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Tag() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Tag // ensure name is unique - if err := tag.EnsureTagNameUnique(0, newTag.Name, qb); err != nil { + if err := tag.EnsureTagNameUnique(ctx, 0, newTag.Name, qb); err != nil { return err } - t, err = qb.Create(newTag) + t, err = qb.Create(ctx, newTag) if err != nil { return err } // update image table if len(imageData) > 0 { - if err := qb.UpdateImage(t.ID, imageData); err != nil { + if err := qb.UpdateImage(ctx, t.ID, imageData); err != nil { return err } } if len(input.Aliases) > 0 { - if err := tag.EnsureAliasesUnique(t.ID, input.Aliases, qb); err != nil { + if err := tag.EnsureAliasesUnique(ctx, t.ID, input.Aliases, qb); err != nil { return err } - if err := qb.UpdateAliases(t.ID, input.Aliases); err != nil { + if err := qb.UpdateAliases(ctx, t.ID, input.Aliases); err != nil { return err } } if len(parentIDs) > 0 { - if err := qb.UpdateParentTags(t.ID, parentIDs); err != nil { + if err := qb.UpdateParentTags(ctx, t.ID, parentIDs); err != nil { return err } } if len(childIDs) > 0 { - if err := qb.UpdateChildTags(t.ID, childIDs); err != nil { + if err := qb.UpdateChildTags(ctx, t.ID, childIDs); err != nil { return err } } @@ -113,7 +113,7 @@ func (r *mutationResolver) TagCreate(ctx context.Context, input models.TagCreate // FIXME: This should be called before any changes are made, but // requires a rewrite of ValidateHierarchy. if len(parentIDs) > 0 || len(childIDs) > 0 { - if err := tag.ValidateHierarchy(t, parentIDs, childIDs, qb); err != nil { + if err := tag.ValidateHierarchy(ctx, t, parentIDs, childIDs, qb); err != nil { return err } } @@ -127,7 +127,7 @@ func (r *mutationResolver) TagCreate(ctx context.Context, input models.TagCreate return r.getTag(ctx, t.ID) } -func (r *mutationResolver) TagUpdate(ctx context.Context, input models.TagUpdateInput) (*models.Tag, error) { +func (r *mutationResolver) TagUpdate(ctx context.Context, input TagUpdateInput) (*models.Tag, error) { // Populate tag from the input tagID, err := strconv.Atoi(input.ID) if err != nil { @@ -168,11 +168,11 @@ func (r *mutationResolver) TagUpdate(ctx context.Context, input models.TagUpdate // Start the transaction and save the tag var t *models.Tag - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Tag() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Tag // ensure name is unique - t, err = qb.Find(tagID) + t, err = qb.Find(ctx, tagID) if err != nil { return err } @@ -188,48 +188,48 @@ func (r *mutationResolver) TagUpdate(ctx context.Context, input models.TagUpdate } if input.Name != nil && t.Name != *input.Name { - if err := tag.EnsureTagNameUnique(tagID, *input.Name, qb); err != nil { + if err := tag.EnsureTagNameUnique(ctx, tagID, *input.Name, qb); err != nil { return err } updatedTag.Name = input.Name } - t, err = qb.Update(updatedTag) + t, err = qb.Update(ctx, updatedTag) if err != nil { return err } // update image table if len(imageData) > 0 { - if err := qb.UpdateImage(tagID, imageData); err != nil { + if err := qb.UpdateImage(ctx, tagID, imageData); err != nil { return err } } else if imageIncluded { // must be unsetting - if err := qb.DestroyImage(tagID); err != nil { + if err := qb.DestroyImage(ctx, tagID); err != nil { return err } } if translator.hasField("aliases") { - if err := tag.EnsureAliasesUnique(tagID, input.Aliases, qb); err != nil { + if err := tag.EnsureAliasesUnique(ctx, tagID, input.Aliases, qb); err != nil { return err } - if err := qb.UpdateAliases(tagID, input.Aliases); err != nil { + if err := qb.UpdateAliases(ctx, tagID, input.Aliases); err != nil { return err } } if parentIDs != nil { - if err := qb.UpdateParentTags(tagID, parentIDs); err != nil { + if err := qb.UpdateParentTags(ctx, tagID, parentIDs); err != nil { return err } } if childIDs != nil { - if err := qb.UpdateChildTags(tagID, childIDs); err != nil { + if err := qb.UpdateChildTags(ctx, tagID, childIDs); err != nil { return err } } @@ -237,7 +237,7 @@ func (r *mutationResolver) TagUpdate(ctx context.Context, input models.TagUpdate // FIXME: This should be called before any changes are made, but // requires a rewrite of ValidateHierarchy. if parentIDs != nil || childIDs != nil { - if err := tag.ValidateHierarchy(t, parentIDs, childIDs, qb); err != nil { + if err := tag.ValidateHierarchy(ctx, t, parentIDs, childIDs, qb); err != nil { logger.Errorf("Error saving tag: %s", err) return err } @@ -252,14 +252,14 @@ func (r *mutationResolver) TagUpdate(ctx context.Context, input models.TagUpdate return r.getTag(ctx, t.ID) } -func (r *mutationResolver) TagDestroy(ctx context.Context, input models.TagDestroyInput) (bool, error) { +func (r *mutationResolver) TagDestroy(ctx context.Context, input TagDestroyInput) (bool, error) { tagID, err := strconv.Atoi(input.ID) if err != nil { return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - return repo.Tag().Destroy(tagID) + if err := r.withTxn(ctx, func(ctx context.Context) error { + return r.repository.Tag.Destroy(ctx, tagID) }); err != nil { return false, err } @@ -275,10 +275,10 @@ func (r *mutationResolver) TagsDestroy(ctx context.Context, tagIDs []string) (bo return false, err } - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Tag() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Tag for _, id := range ids { - if err := qb.Destroy(id); err != nil { + if err := qb.Destroy(ctx, id); err != nil { return err } } @@ -295,7 +295,7 @@ func (r *mutationResolver) TagsDestroy(ctx context.Context, tagIDs []string) (bo return true, nil } -func (r *mutationResolver) TagsMerge(ctx context.Context, input models.TagsMergeInput) (*models.Tag, error) { +func (r *mutationResolver) TagsMerge(ctx context.Context, input TagsMergeInput) (*models.Tag, error) { source, err := stringslice.StringSliceToIntSlice(input.Source) if err != nil { return nil, err @@ -311,11 +311,11 @@ func (r *mutationResolver) TagsMerge(ctx context.Context, input models.TagsMerge } var t *models.Tag - if err := r.withTxn(ctx, func(repo models.Repository) error { - qb := repo.Tag() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Tag var err error - t, err = qb.Find(destination) + t, err = qb.Find(ctx, destination) if err != nil { return err } @@ -324,25 +324,25 @@ func (r *mutationResolver) TagsMerge(ctx context.Context, input models.TagsMerge return fmt.Errorf("Tag with ID %d not found", destination) } - parents, children, err := tag.MergeHierarchy(destination, source, qb) + parents, children, err := tag.MergeHierarchy(ctx, destination, source, qb) if err != nil { return err } - if err = qb.Merge(source, destination); err != nil { + if err = qb.Merge(ctx, source, destination); err != nil { return err } - err = qb.UpdateParentTags(destination, parents) + err = qb.UpdateParentTags(ctx, destination, parents) if err != nil { return err } - err = qb.UpdateChildTags(destination, children) + err = qb.UpdateChildTags(ctx, destination, children) if err != nil { return err } - err = tag.ValidateHierarchy(t, parents, children, qb) + err = tag.ValidateHierarchy(ctx, t, parents, children, qb) if err != nil { logger.Errorf("Error merging tag: %s", err) return err diff --git a/internal/api/resolver_mutation_tag_test.go b/internal/api/resolver_mutation_tag_test.go index 9329f6b7d..bfd2781c3 100644 --- a/internal/api/resolver_mutation_tag_test.go +++ b/internal/api/resolver_mutation_tag_test.go @@ -5,6 +5,7 @@ import ( "errors" "testing" + "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/mocks" "github.com/stashapp/stash/pkg/plugin" @@ -15,18 +16,28 @@ import ( // TODO - move this into a common area func newResolver() *Resolver { + txnMgr := &mocks.TxnManager{} return &Resolver{ - txnManager: mocks.NewTransactionManager(), + txnManager: txnMgr, + repository: manager.Repository{ + TxnManager: txnMgr, + Tag: &mocks.TagReaderWriter{}, + }, hookExecutor: &mockHookExecutor{}, } } -const tagName = "tagName" -const errTagName = "errTagName" +const ( + tagName = "tagName" + errTagName = "errTagName" -const existingTagID = 1 -const existingTagName = "existingTagName" -const newTagID = 2 + existingTagID = 1 + existingTagName = "existingTagName" + + newTagID = 2 +) + +var testCtx = context.Background() type mockHookExecutor struct{} @@ -36,7 +47,7 @@ func (*mockHookExecutor) ExecutePostHooks(ctx context.Context, id int, hookType func TestTagCreate(t *testing.T) { r := newResolver() - tagRW := r.txnManager.(*mocks.TransactionManager).Tag().(*mocks.TagReaderWriter) + tagRW := r.repository.Tag.(*mocks.TagReaderWriter) pp := 1 findFilter := &models.FindFilterType{ @@ -61,25 +72,25 @@ func TestTagCreate(t *testing.T) { } } - tagRW.On("Query", tagFilterForName(existingTagName), findFilter).Return([]*models.Tag{ + tagRW.On("Query", testCtx, tagFilterForName(existingTagName), findFilter).Return([]*models.Tag{ { ID: existingTagID, Name: existingTagName, }, }, 1, nil).Once() - tagRW.On("Query", tagFilterForName(errTagName), findFilter).Return(nil, 0, nil).Once() - tagRW.On("Query", tagFilterForAlias(errTagName), findFilter).Return(nil, 0, nil).Once() + tagRW.On("Query", testCtx, tagFilterForName(errTagName), findFilter).Return(nil, 0, nil).Once() + tagRW.On("Query", testCtx, tagFilterForAlias(errTagName), findFilter).Return(nil, 0, nil).Once() expectedErr := errors.New("TagCreate error") - tagRW.On("Create", mock.AnythingOfType("models.Tag")).Return(nil, expectedErr) + tagRW.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(nil, expectedErr) - _, err := r.Mutation().TagCreate(context.TODO(), models.TagCreateInput{ + _, err := r.Mutation().TagCreate(testCtx, TagCreateInput{ Name: existingTagName, }) assert.NotNil(t, err) - _, err = r.Mutation().TagCreate(context.TODO(), models.TagCreateInput{ + _, err = r.Mutation().TagCreate(testCtx, TagCreateInput{ Name: errTagName, }) @@ -87,18 +98,18 @@ func TestTagCreate(t *testing.T) { tagRW.AssertExpectations(t) r = newResolver() - tagRW = r.txnManager.(*mocks.TransactionManager).Tag().(*mocks.TagReaderWriter) + tagRW = r.repository.Tag.(*mocks.TagReaderWriter) - tagRW.On("Query", tagFilterForName(tagName), findFilter).Return(nil, 0, nil).Once() - tagRW.On("Query", tagFilterForAlias(tagName), findFilter).Return(nil, 0, nil).Once() + tagRW.On("Query", testCtx, tagFilterForName(tagName), findFilter).Return(nil, 0, nil).Once() + tagRW.On("Query", testCtx, tagFilterForAlias(tagName), findFilter).Return(nil, 0, nil).Once() newTag := &models.Tag{ ID: newTagID, Name: tagName, } - tagRW.On("Create", mock.AnythingOfType("models.Tag")).Return(newTag, nil) - tagRW.On("Find", newTagID).Return(newTag, nil) + tagRW.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(newTag, nil) + tagRW.On("Find", testCtx, newTagID).Return(newTag, nil) - tag, err := r.Mutation().TagCreate(context.TODO(), models.TagCreateInput{ + tag, err := r.Mutation().TagCreate(testCtx, TagCreateInput{ Name: tagName, }) diff --git a/internal/api/resolver_query_configuration.go b/internal/api/resolver_query_configuration.go index d0852ff13..f3469de97 100644 --- a/internal/api/resolver_query_configuration.go +++ b/internal/api/resolver_query_configuration.go @@ -13,13 +13,13 @@ import ( "golang.org/x/text/collate" ) -func (r *queryResolver) Configuration(ctx context.Context) (*models.ConfigResult, error) { +func (r *queryResolver) Configuration(ctx context.Context) (*ConfigResult, error) { return makeConfigResult(), nil } -func (r *queryResolver) Directory(ctx context.Context, path, locale *string) (*models.Directory, error) { +func (r *queryResolver) Directory(ctx context.Context, path, locale *string) (*Directory, error) { - directory := &models.Directory{} + directory := &Directory{} var err error col := newCollator(locale, collate.IgnoreCase, collate.Numeric) @@ -59,8 +59,8 @@ func getParent(path string) *string { } } -func makeConfigResult() *models.ConfigResult { - return &models.ConfigResult{ +func makeConfigResult() *ConfigResult { + return &ConfigResult{ General: makeConfigGeneralResult(), Interface: makeConfigInterfaceResult(), Dlna: makeConfigDLNAResult(), @@ -70,7 +70,7 @@ func makeConfigResult() *models.ConfigResult { } } -func makeConfigGeneralResult() *models.ConfigGeneralResult { +func makeConfigGeneralResult() *ConfigGeneralResult { config := config.GetInstance() logFile := config.GetLogFile() @@ -82,7 +82,7 @@ func makeConfigGeneralResult() *models.ConfigGeneralResult { scraperUserAgent := config.GetScraperUserAgent() scraperCDPPath := config.GetScraperCDPPath() - return &models.ConfigGeneralResult{ + return &ConfigGeneralResult{ Stashes: config.GetStashPaths(), DatabasePath: config.GetDatabasePath(), GeneratedPath: config.GetGeneratedPath(), @@ -125,7 +125,7 @@ func makeConfigGeneralResult() *models.ConfigGeneralResult { } } -func makeConfigInterfaceResult() *models.ConfigInterfaceResult { +func makeConfigInterfaceResult() *ConfigInterfaceResult { config := config.GetInstance() menuItems := config.GetMenuItems() soundOnPreview := config.GetSoundOnPreview() @@ -149,7 +149,7 @@ func makeConfigInterfaceResult() *models.ConfigInterfaceResult { // FIXME - misnamed output field means we have redundant fields disableDropdownCreate := config.GetDisableDropdownCreate() - return &models.ConfigInterfaceResult{ + return &ConfigInterfaceResult{ MenuItems: menuItems, SoundOnPreview: &soundOnPreview, WallShowTitle: &wallShowTitle, @@ -177,10 +177,10 @@ func makeConfigInterfaceResult() *models.ConfigInterfaceResult { } } -func makeConfigDLNAResult() *models.ConfigDLNAResult { +func makeConfigDLNAResult() *ConfigDLNAResult { config := config.GetInstance() - return &models.ConfigDLNAResult{ + return &ConfigDLNAResult{ ServerName: config.GetDLNAServerName(), Enabled: config.GetDLNADefaultEnabled(), WhitelistedIPs: config.GetDLNADefaultIPWhitelist(), @@ -188,13 +188,13 @@ func makeConfigDLNAResult() *models.ConfigDLNAResult { } } -func makeConfigScrapingResult() *models.ConfigScrapingResult { +func makeConfigScrapingResult() *ConfigScrapingResult { config := config.GetInstance() scraperUserAgent := config.GetScraperUserAgent() scraperCDPPath := config.GetScraperCDPPath() - return &models.ConfigScrapingResult{ + return &ConfigScrapingResult{ ScraperUserAgent: &scraperUserAgent, ScraperCertCheck: config.GetScraperCertCheck(), ScraperCDPPath: &scraperCDPPath, @@ -202,12 +202,12 @@ func makeConfigScrapingResult() *models.ConfigScrapingResult { } } -func makeConfigDefaultsResult() *models.ConfigDefaultSettingsResult { +func makeConfigDefaultsResult() *ConfigDefaultSettingsResult { config := config.GetInstance() deleteFileDefault := config.GetDeleteFileDefault() deleteGeneratedDefault := config.GetDeleteGeneratedDefault() - return &models.ConfigDefaultSettingsResult{ + return &ConfigDefaultSettingsResult{ Identify: config.GetDefaultIdentifySettings(), Scan: config.GetDefaultScanSettings(), AutoTag: config.GetDefaultAutoTagSettings(), @@ -221,8 +221,8 @@ func makeConfigUIResult() map[string]interface{} { return config.GetInstance().GetUIConfiguration() } -func (r *queryResolver) ValidateStashBoxCredentials(ctx context.Context, input models.StashBoxInput) (*models.StashBoxValidationResult, error) { - client := stashbox.NewClient(models.StashBox{Endpoint: input.Endpoint, APIKey: input.APIKey}, r.txnManager) +func (r *queryResolver) ValidateStashBoxCredentials(ctx context.Context, input config.StashBoxInput) (*StashBoxValidationResult, error) { + client := stashbox.NewClient(models.StashBox{Endpoint: input.Endpoint, APIKey: input.APIKey}, r.txnManager, r.stashboxRepository()) user, err := client.GetUser(ctx) valid := user != nil && user.Me != nil @@ -248,7 +248,7 @@ func (r *queryResolver) ValidateStashBoxCredentials(ctx context.Context, input m } } - result := models.StashBoxValidationResult{ + result := StashBoxValidationResult{ Valid: valid, Status: status, } diff --git a/internal/api/resolver_query_dlna.go b/internal/api/resolver_query_dlna.go index 8d616f463..e620c526d 100644 --- a/internal/api/resolver_query_dlna.go +++ b/internal/api/resolver_query_dlna.go @@ -3,10 +3,10 @@ package api import ( "context" + "github.com/stashapp/stash/internal/dlna" "github.com/stashapp/stash/internal/manager" - "github.com/stashapp/stash/pkg/models" ) -func (r *queryResolver) DlnaStatus(ctx context.Context) (*models.DLNAStatus, error) { +func (r *queryResolver) DlnaStatus(ctx context.Context) (*dlna.Status, error) { return manager.GetInstance().DLNAService.Status(), nil } diff --git a/internal/api/resolver_query_find_gallery.go b/internal/api/resolver_query_find_gallery.go index c00332a4f..ee12471d1 100644 --- a/internal/api/resolver_query_find_gallery.go +++ b/internal/api/resolver_query_find_gallery.go @@ -13,8 +13,8 @@ func (r *queryResolver) FindGallery(ctx context.Context, id string) (ret *models return nil, err } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Gallery().Find(idInt) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Gallery.Find(ctx, idInt) return err }); err != nil { return nil, err @@ -23,14 +23,14 @@ func (r *queryResolver) FindGallery(ctx context.Context, id string) (ret *models return ret, nil } -func (r *queryResolver) FindGalleries(ctx context.Context, galleryFilter *models.GalleryFilterType, filter *models.FindFilterType) (ret *models.FindGalleriesResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - galleries, total, err := repo.Gallery().Query(galleryFilter, filter) +func (r *queryResolver) FindGalleries(ctx context.Context, galleryFilter *models.GalleryFilterType, filter *models.FindFilterType) (ret *FindGalleriesResultType, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { + galleries, total, err := r.repository.Gallery.Query(ctx, galleryFilter, filter) if err != nil { return err } - ret = &models.FindGalleriesResultType{ + ret = &FindGalleriesResultType{ Count: total, Galleries: galleries, } diff --git a/internal/api/resolver_query_find_image.go b/internal/api/resolver_query_find_image.go index d26a8b081..ad9bf6c94 100644 --- a/internal/api/resolver_query_find_image.go +++ b/internal/api/resolver_query_find_image.go @@ -12,8 +12,8 @@ import ( func (r *queryResolver) FindImage(ctx context.Context, id *string, checksum *string) (*models.Image, error) { var image *models.Image - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - qb := repo.Image() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Image var err error if id != nil { @@ -22,12 +22,20 @@ func (r *queryResolver) FindImage(ctx context.Context, id *string, checksum *str return err } - image, err = qb.Find(idInt) + image, err = qb.Find(ctx, idInt) if err != nil { return err } } else if checksum != nil { - image, err = qb.FindByChecksum(*checksum) + var images []*models.Image + images, err = qb.FindByChecksum(ctx, *checksum) + if err != nil { + return err + } + + if len(images) > 0 { + image = images[0] + } } return err @@ -38,13 +46,13 @@ func (r *queryResolver) FindImage(ctx context.Context, id *string, checksum *str return image, nil } -func (r *queryResolver) FindImages(ctx context.Context, imageFilter *models.ImageFilterType, imageIds []int, filter *models.FindFilterType) (ret *models.FindImagesResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - qb := repo.Image() +func (r *queryResolver) FindImages(ctx context.Context, imageFilter *models.ImageFilterType, imageIds []int, filter *models.FindFilterType) (ret *FindImagesResultType, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Image fields := graphql.CollectAllFields(ctx) - result, err := qb.Query(models.ImageQueryOptions{ + result, err := qb.Query(ctx, models.ImageQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: filter, Count: stringslice.StrInclude(fields, "count"), @@ -57,12 +65,12 @@ func (r *queryResolver) FindImages(ctx context.Context, imageFilter *models.Imag return err } - images, err := result.Resolve() + images, err := result.Resolve(ctx) if err != nil { return err } - ret = &models.FindImagesResultType{ + ret = &FindImagesResultType{ Count: result.Count, Images: images, Megapixels: result.Megapixels, diff --git a/internal/api/resolver_query_find_movie.go b/internal/api/resolver_query_find_movie.go index 1a66c2461..7505c7f36 100644 --- a/internal/api/resolver_query_find_movie.go +++ b/internal/api/resolver_query_find_movie.go @@ -13,8 +13,8 @@ func (r *queryResolver) FindMovie(ctx context.Context, id string) (ret *models.M return nil, err } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Movie().Find(idInt) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Movie.Find(ctx, idInt) return err }); err != nil { return nil, err @@ -23,14 +23,14 @@ func (r *queryResolver) FindMovie(ctx context.Context, id string) (ret *models.M return ret, nil } -func (r *queryResolver) FindMovies(ctx context.Context, movieFilter *models.MovieFilterType, filter *models.FindFilterType) (ret *models.FindMoviesResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - movies, total, err := repo.Movie().Query(movieFilter, filter) +func (r *queryResolver) FindMovies(ctx context.Context, movieFilter *models.MovieFilterType, filter *models.FindFilterType) (ret *FindMoviesResultType, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { + movies, total, err := r.repository.Movie.Query(ctx, movieFilter, filter) if err != nil { return err } - ret = &models.FindMoviesResultType{ + ret = &FindMoviesResultType{ Count: total, Movies: movies, } @@ -44,8 +44,8 @@ func (r *queryResolver) FindMovies(ctx context.Context, movieFilter *models.Movi } func (r *queryResolver) AllMovies(ctx context.Context) (ret []*models.Movie, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Movie().All() + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Movie.All(ctx) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_query_find_performer.go b/internal/api/resolver_query_find_performer.go index 32cc46891..4314b0f69 100644 --- a/internal/api/resolver_query_find_performer.go +++ b/internal/api/resolver_query_find_performer.go @@ -13,8 +13,8 @@ func (r *queryResolver) FindPerformer(ctx context.Context, id string) (ret *mode return nil, err } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Performer().Find(idInt) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Performer.Find(ctx, idInt) return err }); err != nil { return nil, err @@ -23,14 +23,14 @@ func (r *queryResolver) FindPerformer(ctx context.Context, id string) (ret *mode return ret, nil } -func (r *queryResolver) FindPerformers(ctx context.Context, performerFilter *models.PerformerFilterType, filter *models.FindFilterType) (ret *models.FindPerformersResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - performers, total, err := repo.Performer().Query(performerFilter, filter) +func (r *queryResolver) FindPerformers(ctx context.Context, performerFilter *models.PerformerFilterType, filter *models.FindFilterType) (ret *FindPerformersResultType, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { + performers, total, err := r.repository.Performer.Query(ctx, performerFilter, filter) if err != nil { return err } - ret = &models.FindPerformersResultType{ + ret = &FindPerformersResultType{ Count: total, Performers: performers, } @@ -43,8 +43,8 @@ func (r *queryResolver) FindPerformers(ctx context.Context, performerFilter *mod } func (r *queryResolver) AllPerformers(ctx context.Context) (ret []*models.Performer, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Performer().All() + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Performer.All(ctx) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_query_find_saved_filter.go b/internal/api/resolver_query_find_saved_filter.go index a28ef2f59..7b934f581 100644 --- a/internal/api/resolver_query_find_saved_filter.go +++ b/internal/api/resolver_query_find_saved_filter.go @@ -13,8 +13,8 @@ func (r *queryResolver) FindSavedFilter(ctx context.Context, id string) (ret *mo return nil, err } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.SavedFilter().Find(idInt) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.SavedFilter.Find(ctx, idInt) return err }); err != nil { return nil, err @@ -23,11 +23,11 @@ func (r *queryResolver) FindSavedFilter(ctx context.Context, id string) (ret *mo } func (r *queryResolver) FindSavedFilters(ctx context.Context, mode *models.FilterMode) (ret []*models.SavedFilter, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { if mode != nil { - ret, err = repo.SavedFilter().FindByMode(*mode) + ret, err = r.repository.SavedFilter.FindByMode(ctx, *mode) } else { - ret, err = repo.SavedFilter().All() + ret, err = r.repository.SavedFilter.All(ctx) } return err }); err != nil { @@ -37,8 +37,8 @@ func (r *queryResolver) FindSavedFilters(ctx context.Context, mode *models.Filte } func (r *queryResolver) FindDefaultFilter(ctx context.Context, mode models.FilterMode) (ret *models.SavedFilter, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.SavedFilter().FindDefault(mode) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.SavedFilter.FindDefault(ctx, mode) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_query_find_scene.go b/internal/api/resolver_query_find_scene.go index 180e25a32..9f049805f 100644 --- a/internal/api/resolver_query_find_scene.go +++ b/internal/api/resolver_query_find_scene.go @@ -12,20 +12,24 @@ import ( func (r *queryResolver) FindScene(ctx context.Context, id *string, checksum *string) (*models.Scene, error) { var scene *models.Scene - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - qb := repo.Scene() + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene var err error if id != nil { idInt, err := strconv.Atoi(*id) if err != nil { return err } - scene, err = qb.Find(idInt) + scene, err = qb.Find(ctx, idInt) if err != nil { return err } } else if checksum != nil { - scene, err = qb.FindByChecksum(*checksum) + var scenes []*models.Scene + scenes, err = qb.FindByChecksum(ctx, *checksum) + if len(scenes) > 0 { + scene = scenes[0] + } } return err @@ -36,24 +40,29 @@ func (r *queryResolver) FindScene(ctx context.Context, id *string, checksum *str return scene, nil } -func (r *queryResolver) FindSceneByHash(ctx context.Context, input models.SceneHashInput) (*models.Scene, error) { +func (r *queryResolver) FindSceneByHash(ctx context.Context, input SceneHashInput) (*models.Scene, error) { var scene *models.Scene - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - qb := repo.Scene() - var err error + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Scene if input.Checksum != nil { - scene, err = qb.FindByChecksum(*input.Checksum) + scenes, err := qb.FindByChecksum(ctx, *input.Checksum) if err != nil { return err } + if len(scenes) > 0 { + scene = scenes[0] + } } if scene == nil && input.Oshash != nil { - scene, err = qb.FindByOSHash(*input.Oshash) + scenes, err := qb.FindByOSHash(ctx, *input.Oshash) if err != nil { return err } + if len(scenes) > 0 { + scene = scenes[0] + } } return nil @@ -64,8 +73,8 @@ func (r *queryResolver) FindSceneByHash(ctx context.Context, input models.SceneH return scene, nil } -func (r *queryResolver) FindScenes(ctx context.Context, sceneFilter *models.SceneFilterType, sceneIDs []int, filter *models.FindFilterType) (ret *models.FindScenesResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { +func (r *queryResolver) FindScenes(ctx context.Context, sceneFilter *models.SceneFilterType, sceneIDs []int, filter *models.FindFilterType) (ret *FindScenesResultType, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { var scenes []*models.Scene var err error @@ -73,17 +82,26 @@ func (r *queryResolver) FindScenes(ctx context.Context, sceneFilter *models.Scen result := &models.SceneQueryResult{} if len(sceneIDs) > 0 { - scenes, err = repo.Scene().FindMany(sceneIDs) + scenes, err = r.repository.Scene.FindMany(ctx, sceneIDs) if err == nil { result.Count = len(scenes) for _, s := range scenes { - result.TotalDuration += s.Duration.Float64 - size, _ := strconv.ParseFloat(s.Size.String, 64) - result.TotalSize += size + if err = s.LoadPrimaryFile(ctx, r.repository.File); err != nil { + break + } + + f := s.Files.Primary() + if f == nil { + continue + } + + result.TotalDuration += f.Duration + + result.TotalSize += float64(f.Size) } } } else { - result, err = repo.Scene().Query(models.SceneQueryOptions{ + result, err = r.repository.Scene.Query(ctx, models.SceneQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: filter, Count: stringslice.StrInclude(fields, "count"), @@ -93,7 +111,7 @@ func (r *queryResolver) FindScenes(ctx context.Context, sceneFilter *models.Scen TotalSize: stringslice.StrInclude(fields, "filesize"), }) if err == nil { - scenes, err = result.Resolve() + scenes, err = result.Resolve(ctx) } } @@ -101,7 +119,7 @@ func (r *queryResolver) FindScenes(ctx context.Context, sceneFilter *models.Scen return err } - ret = &models.FindScenesResultType{ + ret = &FindScenesResultType{ Count: result.Count, Scenes: scenes, Duration: result.TotalDuration, @@ -116,8 +134,8 @@ func (r *queryResolver) FindScenes(ctx context.Context, sceneFilter *models.Scen return ret, nil } -func (r *queryResolver) FindScenesByPathRegex(ctx context.Context, filter *models.FindFilterType) (ret *models.FindScenesResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { +func (r *queryResolver) FindScenesByPathRegex(ctx context.Context, filter *models.FindFilterType) (ret *FindScenesResultType, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { sceneFilter := &models.SceneFilterType{} @@ -138,7 +156,7 @@ func (r *queryResolver) FindScenesByPathRegex(ctx context.Context, filter *model fields := graphql.CollectAllFields(ctx) - result, err := repo.Scene().Query(models.SceneQueryOptions{ + result, err := r.repository.Scene.Query(ctx, models.SceneQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: queryFilter, Count: stringslice.StrInclude(fields, "count"), @@ -151,12 +169,12 @@ func (r *queryResolver) FindScenesByPathRegex(ctx context.Context, filter *model return err } - scenes, err := result.Resolve() + scenes, err := result.Resolve(ctx) if err != nil { return err } - ret = &models.FindScenesResultType{ + ret = &FindScenesResultType{ Count: result.Count, Scenes: scenes, Duration: result.TotalDuration, @@ -171,17 +189,23 @@ func (r *queryResolver) FindScenesByPathRegex(ctx context.Context, filter *model return ret, nil } -func (r *queryResolver) ParseSceneFilenames(ctx context.Context, filter *models.FindFilterType, config models.SceneParserInput) (ret *models.SceneParserResultType, err error) { +func (r *queryResolver) ParseSceneFilenames(ctx context.Context, filter *models.FindFilterType, config manager.SceneParserInput) (ret *SceneParserResultType, err error) { parser := manager.NewSceneFilenameParser(filter, config) - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - result, count, err := parser.Parse(repo) + if err := r.withTxn(ctx, func(ctx context.Context) error { + result, count, err := parser.Parse(ctx, manager.SceneFilenameParserRepository{ + Scene: r.repository.Scene, + Performer: r.repository.Performer, + Studio: r.repository.Studio, + Movie: r.repository.Movie, + Tag: r.repository.Tag, + }) if err != nil { return err } - ret = &models.SceneParserResultType{ + ret = &SceneParserResultType{ Count: count, Results: result, } @@ -199,8 +223,8 @@ func (r *queryResolver) FindDuplicateScenes(ctx context.Context, distance *int) if distance != nil { dist = *distance } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Scene().FindDuplicates(dist) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Scene.FindDuplicates(ctx, dist) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_query_find_scene_marker.go b/internal/api/resolver_query_find_scene_marker.go index a3d6b6058..03b9e261a 100644 --- a/internal/api/resolver_query_find_scene_marker.go +++ b/internal/api/resolver_query_find_scene_marker.go @@ -6,13 +6,13 @@ import ( "github.com/stashapp/stash/pkg/models" ) -func (r *queryResolver) FindSceneMarkers(ctx context.Context, sceneMarkerFilter *models.SceneMarkerFilterType, filter *models.FindFilterType) (ret *models.FindSceneMarkersResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - sceneMarkers, total, err := repo.SceneMarker().Query(sceneMarkerFilter, filter) +func (r *queryResolver) FindSceneMarkers(ctx context.Context, sceneMarkerFilter *models.SceneMarkerFilterType, filter *models.FindFilterType) (ret *FindSceneMarkersResultType, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { + sceneMarkers, total, err := r.repository.SceneMarker.Query(ctx, sceneMarkerFilter, filter) if err != nil { return err } - ret = &models.FindSceneMarkersResultType{ + ret = &FindSceneMarkersResultType{ Count: total, SceneMarkers: sceneMarkers, } diff --git a/internal/api/resolver_query_find_studio.go b/internal/api/resolver_query_find_studio.go index 71677cb35..0bd17b9ad 100644 --- a/internal/api/resolver_query_find_studio.go +++ b/internal/api/resolver_query_find_studio.go @@ -13,9 +13,9 @@ func (r *queryResolver) FindStudio(ctx context.Context, id string) (ret *models. return nil, err } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { var err error - ret, err = repo.Studio().Find(idInt) + ret, err = r.repository.Studio.Find(ctx, idInt) return err }); err != nil { return nil, err @@ -24,14 +24,14 @@ func (r *queryResolver) FindStudio(ctx context.Context, id string) (ret *models. return ret, nil } -func (r *queryResolver) FindStudios(ctx context.Context, studioFilter *models.StudioFilterType, filter *models.FindFilterType) (ret *models.FindStudiosResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - studios, total, err := repo.Studio().Query(studioFilter, filter) +func (r *queryResolver) FindStudios(ctx context.Context, studioFilter *models.StudioFilterType, filter *models.FindFilterType) (ret *FindStudiosResultType, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { + studios, total, err := r.repository.Studio.Query(ctx, studioFilter, filter) if err != nil { return err } - ret = &models.FindStudiosResultType{ + ret = &FindStudiosResultType{ Count: total, Studios: studios, } @@ -45,8 +45,8 @@ func (r *queryResolver) FindStudios(ctx context.Context, studioFilter *models.St } func (r *queryResolver) AllStudios(ctx context.Context) (ret []*models.Studio, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Studio().All() + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Studio.All(ctx) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_query_find_tag.go b/internal/api/resolver_query_find_tag.go index e44366361..77bd57f98 100644 --- a/internal/api/resolver_query_find_tag.go +++ b/internal/api/resolver_query_find_tag.go @@ -13,8 +13,8 @@ func (r *queryResolver) FindTag(ctx context.Context, id string) (ret *models.Tag return nil, err } - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().Find(idInt) + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Tag.Find(ctx, idInt) return err }); err != nil { return nil, err @@ -23,14 +23,14 @@ func (r *queryResolver) FindTag(ctx context.Context, id string) (ret *models.Tag return ret, nil } -func (r *queryResolver) FindTags(ctx context.Context, tagFilter *models.TagFilterType, filter *models.FindFilterType) (ret *models.FindTagsResultType, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - tags, total, err := repo.Tag().Query(tagFilter, filter) +func (r *queryResolver) FindTags(ctx context.Context, tagFilter *models.TagFilterType, filter *models.FindFilterType) (ret *FindTagsResultType, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { + tags, total, err := r.repository.Tag.Query(ctx, tagFilter, filter) if err != nil { return err } - ret = &models.FindTagsResultType{ + ret = &FindTagsResultType{ Count: total, Tags: tags, } @@ -44,8 +44,8 @@ func (r *queryResolver) FindTags(ctx context.Context, tagFilter *models.TagFilte } func (r *queryResolver) AllTags(ctx context.Context) (ret []*models.Tag, err error) { - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { - ret, err = repo.Tag().All() + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Tag.All(ctx) return err }); err != nil { return nil, err diff --git a/internal/api/resolver_query_job.go b/internal/api/resolver_query_job.go index 06f090190..aaa671013 100644 --- a/internal/api/resolver_query_job.go +++ b/internal/api/resolver_query_job.go @@ -6,13 +6,12 @@ import ( "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/pkg/job" - "github.com/stashapp/stash/pkg/models" ) -func (r *queryResolver) JobQueue(ctx context.Context) ([]*models.Job, error) { +func (r *queryResolver) JobQueue(ctx context.Context) ([]*Job, error) { queue := manager.GetInstance().JobManager.GetQueue() - var ret []*models.Job + var ret []*Job for _, j := range queue { ret = append(ret, jobToJobModel(j)) } @@ -20,7 +19,7 @@ func (r *queryResolver) JobQueue(ctx context.Context) ([]*models.Job, error) { return ret, nil } -func (r *queryResolver) FindJob(ctx context.Context, input models.FindJobInput) (*models.Job, error) { +func (r *queryResolver) FindJob(ctx context.Context, input FindJobInput) (*Job, error) { jobID, err := strconv.Atoi(input.ID) if err != nil { return nil, err @@ -33,10 +32,10 @@ func (r *queryResolver) FindJob(ctx context.Context, input models.FindJobInput) return jobToJobModel(*j), nil } -func jobToJobModel(j job.Job) *models.Job { - ret := &models.Job{ +func jobToJobModel(j job.Job) *Job { + ret := &Job{ ID: strconv.Itoa(j.ID), - Status: models.JobStatus(j.Status), + Status: JobStatus(j.Status), Description: j.Description, SubTasks: j.Details, StartTime: j.StartTime, diff --git a/internal/api/resolver_query_logs.go b/internal/api/resolver_query_logs.go index c6ca6d9fd..e0cee9a84 100644 --- a/internal/api/resolver_query_logs.go +++ b/internal/api/resolver_query_logs.go @@ -4,16 +4,15 @@ import ( "context" "github.com/stashapp/stash/internal/manager" - "github.com/stashapp/stash/pkg/models" ) -func (r *queryResolver) Logs(ctx context.Context) ([]*models.LogEntry, error) { +func (r *queryResolver) Logs(ctx context.Context) ([]*LogEntry, error) { logger := manager.GetInstance().Logger logCache := logger.GetLogCache() - ret := make([]*models.LogEntry, len(logCache)) + ret := make([]*LogEntry, len(logCache)) for i, entry := range logCache { - ret[i] = &models.LogEntry{ + ret[i] = &LogEntry{ Time: entry.Time, Level: getLogLevel(entry.Type), Message: entry.Message, diff --git a/internal/api/resolver_query_metadata.go b/internal/api/resolver_query_metadata.go index d96beb407..b9189b102 100644 --- a/internal/api/resolver_query_metadata.go +++ b/internal/api/resolver_query_metadata.go @@ -4,9 +4,8 @@ import ( "context" "github.com/stashapp/stash/internal/manager" - "github.com/stashapp/stash/pkg/models" ) -func (r *queryResolver) SystemStatus(ctx context.Context) (*models.SystemStatus, error) { +func (r *queryResolver) SystemStatus(ctx context.Context) (*manager.SystemStatus, error) { return manager.GetInstance().GetSystemStatus(), nil } diff --git a/internal/api/resolver_query_plugin.go b/internal/api/resolver_query_plugin.go index 87c93dfcc..61463c5df 100644 --- a/internal/api/resolver_query_plugin.go +++ b/internal/api/resolver_query_plugin.go @@ -4,13 +4,13 @@ import ( "context" "github.com/stashapp/stash/internal/manager" - "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/plugin" ) -func (r *queryResolver) Plugins(ctx context.Context) ([]*models.Plugin, error) { +func (r *queryResolver) Plugins(ctx context.Context) ([]*plugin.Plugin, error) { return manager.GetInstance().PluginCache.ListPlugins(), nil } -func (r *queryResolver) PluginTasks(ctx context.Context) ([]*models.PluginTask, error) { +func (r *queryResolver) PluginTasks(ctx context.Context) ([]*plugin.PluginTask, error) { return manager.GetInstance().PluginCache.ListPluginTasks(), nil } diff --git a/internal/api/resolver_query_scene.go b/internal/api/resolver_query_scene.go index 3d593d911..b6da7b901 100644 --- a/internal/api/resolver_query_scene.go +++ b/internal/api/resolver_query_scene.go @@ -11,13 +11,18 @@ import ( "github.com/stashapp/stash/pkg/models" ) -func (r *queryResolver) SceneStreams(ctx context.Context, id *string) ([]*models.SceneStreamEndpoint, error) { +func (r *queryResolver) SceneStreams(ctx context.Context, id *string) ([]*manager.SceneStreamEndpoint, error) { // find the scene var scene *models.Scene - if err := r.withReadTxn(ctx, func(repo models.ReaderRepository) error { + if err := r.withTxn(ctx, func(ctx context.Context) error { idInt, _ := strconv.Atoi(*id) var err error - scene, err = repo.Scene().Find(idInt) + scene, err = r.repository.Scene.Find(ctx, idInt) + + if scene != nil { + err = scene.LoadPrimaryFile(ctx, r.repository.File) + } + return err }); err != nil { return nil, err diff --git a/internal/api/resolver_query_scraper.go b/internal/api/resolver_query_scraper.go index 8fd6c345a..85f47ee2c 100644 --- a/internal/api/resolver_query_scraper.go +++ b/internal/api/resolver_query_scraper.go @@ -17,13 +17,13 @@ import ( "github.com/stashapp/stash/pkg/sliceutil/stringslice" ) -func (r *queryResolver) ScrapeURL(ctx context.Context, url string, ty models.ScrapeContentType) (models.ScrapedContent, error) { +func (r *queryResolver) ScrapeURL(ctx context.Context, url string, ty scraper.ScrapeContentType) (scraper.ScrapedContent, error) { return r.scraperCache().ScrapeURL(ctx, url, ty) } // deprecated func (r *queryResolver) ScrapeFreeonesPerformerList(ctx context.Context, query string) ([]string, error) { - content, err := r.scraperCache().ScrapeName(ctx, scraper.FreeonesScraperID, query, models.ScrapeContentTypePerformer) + content, err := r.scraperCache().ScrapeName(ctx, scraper.FreeonesScraperID, query, scraper.ScrapeContentTypePerformer) if err != nil { return nil, err @@ -44,24 +44,24 @@ func (r *queryResolver) ScrapeFreeonesPerformerList(ctx context.Context, query s return ret, nil } -func (r *queryResolver) ListScrapers(ctx context.Context, types []models.ScrapeContentType) ([]*models.Scraper, error) { +func (r *queryResolver) ListScrapers(ctx context.Context, types []scraper.ScrapeContentType) ([]*scraper.Scraper, error) { return r.scraperCache().ListScrapers(types), nil } -func (r *queryResolver) ListPerformerScrapers(ctx context.Context) ([]*models.Scraper, error) { - return r.scraperCache().ListScrapers([]models.ScrapeContentType{models.ScrapeContentTypePerformer}), nil +func (r *queryResolver) ListPerformerScrapers(ctx context.Context) ([]*scraper.Scraper, error) { + return r.scraperCache().ListScrapers([]scraper.ScrapeContentType{scraper.ScrapeContentTypePerformer}), nil } -func (r *queryResolver) ListSceneScrapers(ctx context.Context) ([]*models.Scraper, error) { - return r.scraperCache().ListScrapers([]models.ScrapeContentType{models.ScrapeContentTypeScene}), nil +func (r *queryResolver) ListSceneScrapers(ctx context.Context) ([]*scraper.Scraper, error) { + return r.scraperCache().ListScrapers([]scraper.ScrapeContentType{scraper.ScrapeContentTypeScene}), nil } -func (r *queryResolver) ListGalleryScrapers(ctx context.Context) ([]*models.Scraper, error) { - return r.scraperCache().ListScrapers([]models.ScrapeContentType{models.ScrapeContentTypeGallery}), nil +func (r *queryResolver) ListGalleryScrapers(ctx context.Context) ([]*scraper.Scraper, error) { + return r.scraperCache().ListScrapers([]scraper.ScrapeContentType{scraper.ScrapeContentTypeGallery}), nil } -func (r *queryResolver) ListMovieScrapers(ctx context.Context) ([]*models.Scraper, error) { - return r.scraperCache().ListScrapers([]models.ScrapeContentType{models.ScrapeContentTypeMovie}), nil +func (r *queryResolver) ListMovieScrapers(ctx context.Context) ([]*scraper.Scraper, error) { + return r.scraperCache().ListScrapers([]scraper.ScrapeContentType{scraper.ScrapeContentTypeMovie}), nil } func (r *queryResolver) ScrapePerformerList(ctx context.Context, scraperID string, query string) ([]*models.ScrapedPerformer, error) { @@ -69,7 +69,7 @@ func (r *queryResolver) ScrapePerformerList(ctx context.Context, scraperID strin return nil, nil } - content, err := r.scraperCache().ScrapeName(ctx, scraperID, query, models.ScrapeContentTypePerformer) + content, err := r.scraperCache().ScrapeName(ctx, scraperID, query, scraper.ScrapeContentTypePerformer) if err != nil { return nil, err } @@ -77,7 +77,7 @@ func (r *queryResolver) ScrapePerformerList(ctx context.Context, scraperID strin return marshalScrapedPerformers(content) } -func (r *queryResolver) ScrapePerformer(ctx context.Context, scraperID string, scrapedPerformer models.ScrapedPerformerInput) (*models.ScrapedPerformer, error) { +func (r *queryResolver) ScrapePerformer(ctx context.Context, scraperID string, scrapedPerformer scraper.ScrapedPerformerInput) (*models.ScrapedPerformer, error) { content, err := r.scraperCache().ScrapeFragment(ctx, scraperID, scraper.Input{Performer: &scrapedPerformer}) if err != nil { return nil, err @@ -86,7 +86,7 @@ func (r *queryResolver) ScrapePerformer(ctx context.Context, scraperID string, s } func (r *queryResolver) ScrapePerformerURL(ctx context.Context, url string) (*models.ScrapedPerformer, error) { - content, err := r.scraperCache().ScrapeURL(ctx, url, models.ScrapeContentTypePerformer) + content, err := r.scraperCache().ScrapeURL(ctx, url, scraper.ScrapeContentTypePerformer) if err != nil { return nil, err } @@ -94,12 +94,12 @@ func (r *queryResolver) ScrapePerformerURL(ctx context.Context, url string) (*mo return marshalScrapedPerformer(content) } -func (r *queryResolver) ScrapeSceneQuery(ctx context.Context, scraperID string, query string) ([]*models.ScrapedScene, error) { +func (r *queryResolver) ScrapeSceneQuery(ctx context.Context, scraperID string, query string) ([]*scraper.ScrapedScene, error) { if query == "" { return nil, nil } - content, err := r.scraperCache().ScrapeName(ctx, scraperID, query, models.ScrapeContentTypeScene) + content, err := r.scraperCache().ScrapeName(ctx, scraperID, query, scraper.ScrapeContentTypeScene) if err != nil { return nil, err } @@ -113,13 +113,13 @@ func (r *queryResolver) ScrapeSceneQuery(ctx context.Context, scraperID string, return ret, nil } -func (r *queryResolver) ScrapeScene(ctx context.Context, scraperID string, scene models.SceneUpdateInput) (*models.ScrapedScene, error) { +func (r *queryResolver) ScrapeScene(ctx context.Context, scraperID string, scene models.SceneUpdateInput) (*scraper.ScrapedScene, error) { id, err := strconv.Atoi(scene.ID) if err != nil { return nil, fmt.Errorf("%w: scene.ID is not an integer: '%s'", ErrInput, scene.ID) } - content, err := r.scraperCache().ScrapeID(ctx, scraperID, id, models.ScrapeContentTypeScene) + content, err := r.scraperCache().ScrapeID(ctx, scraperID, id, scraper.ScrapeContentTypeScene) if err != nil { return nil, err } @@ -129,13 +129,13 @@ func (r *queryResolver) ScrapeScene(ctx context.Context, scraperID string, scene return nil, err } - filterSceneTags([]*models.ScrapedScene{ret}) + filterSceneTags([]*scraper.ScrapedScene{ret}) return ret, nil } // filterSceneTags removes tags matching excluded tag patterns from the provided scraped scenes -func filterSceneTags(scenes []*models.ScrapedScene) { +func filterSceneTags(scenes []*scraper.ScrapedScene) { excludePatterns := manager.GetInstance().Config.GetScraperExcludeTagPatterns() var excludeRegexps []*regexp.Regexp @@ -179,8 +179,8 @@ func filterSceneTags(scenes []*models.ScrapedScene) { } } -func (r *queryResolver) ScrapeSceneURL(ctx context.Context, url string) (*models.ScrapedScene, error) { - content, err := r.scraperCache().ScrapeURL(ctx, url, models.ScrapeContentTypeScene) +func (r *queryResolver) ScrapeSceneURL(ctx context.Context, url string) (*scraper.ScrapedScene, error) { + content, err := r.scraperCache().ScrapeURL(ctx, url, scraper.ScrapeContentTypeScene) if err != nil { return nil, err } @@ -190,18 +190,18 @@ func (r *queryResolver) ScrapeSceneURL(ctx context.Context, url string) (*models return nil, err } - filterSceneTags([]*models.ScrapedScene{ret}) + filterSceneTags([]*scraper.ScrapedScene{ret}) return ret, nil } -func (r *queryResolver) ScrapeGallery(ctx context.Context, scraperID string, gallery models.GalleryUpdateInput) (*models.ScrapedGallery, error) { +func (r *queryResolver) ScrapeGallery(ctx context.Context, scraperID string, gallery models.GalleryUpdateInput) (*scraper.ScrapedGallery, error) { id, err := strconv.Atoi(gallery.ID) if err != nil { return nil, fmt.Errorf("%w: gallery id is not an integer: '%s'", ErrInput, gallery.ID) } - content, err := r.scraperCache().ScrapeID(ctx, scraperID, id, models.ScrapeContentTypeGallery) + content, err := r.scraperCache().ScrapeID(ctx, scraperID, id, scraper.ScrapeContentTypeGallery) if err != nil { return nil, err } @@ -209,8 +209,8 @@ func (r *queryResolver) ScrapeGallery(ctx context.Context, scraperID string, gal return marshalScrapedGallery(content) } -func (r *queryResolver) ScrapeGalleryURL(ctx context.Context, url string) (*models.ScrapedGallery, error) { - content, err := r.scraperCache().ScrapeURL(ctx, url, models.ScrapeContentTypeGallery) +func (r *queryResolver) ScrapeGalleryURL(ctx context.Context, url string) (*scraper.ScrapedGallery, error) { + content, err := r.scraperCache().ScrapeURL(ctx, url, scraper.ScrapeContentTypeGallery) if err != nil { return nil, err } @@ -219,7 +219,7 @@ func (r *queryResolver) ScrapeGalleryURL(ctx context.Context, url string) (*mode } func (r *queryResolver) ScrapeMovieURL(ctx context.Context, url string) (*models.ScrapedMovie, error) { - content, err := r.scraperCache().ScrapeURL(ctx, url, models.ScrapeContentTypeMovie) + content, err := r.scraperCache().ScrapeURL(ctx, url, scraper.ScrapeContentTypeMovie) if err != nil { return nil, err } @@ -234,11 +234,11 @@ func (r *queryResolver) getStashBoxClient(index int) (*stashbox.Client, error) { return nil, fmt.Errorf("%w: invalid stash_box_index %d", ErrInput, index) } - return stashbox.NewClient(*boxes[index], r.txnManager), nil + return stashbox.NewClient(*boxes[index], r.txnManager, r.stashboxRepository()), nil } -func (r *queryResolver) ScrapeSingleScene(ctx context.Context, source models.ScraperSourceInput, input models.ScrapeSingleSceneInput) ([]*models.ScrapedScene, error) { - var ret []*models.ScrapedScene +func (r *queryResolver) ScrapeSingleScene(ctx context.Context, source scraper.Source, input ScrapeSingleSceneInput) ([]*scraper.ScrapedScene, error) { + var ret []*scraper.ScrapedScene var sceneID int if input.SceneID != nil { @@ -252,22 +252,22 @@ func (r *queryResolver) ScrapeSingleScene(ctx context.Context, source models.Scr switch { case source.ScraperID != nil: var err error - var c models.ScrapedContent - var content []models.ScrapedContent + var c scraper.ScrapedContent + var content []scraper.ScrapedContent switch { case input.SceneID != nil: - c, err = r.scraperCache().ScrapeID(ctx, *source.ScraperID, sceneID, models.ScrapeContentTypeScene) + c, err = r.scraperCache().ScrapeID(ctx, *source.ScraperID, sceneID, scraper.ScrapeContentTypeScene) if c != nil { - content = []models.ScrapedContent{c} + content = []scraper.ScrapedContent{c} } case input.SceneInput != nil: c, err = r.scraperCache().ScrapeFragment(ctx, *source.ScraperID, scraper.Input{Scene: input.SceneInput}) if c != nil { - content = []models.ScrapedContent{c} + content = []scraper.ScrapedContent{c} } case input.Query != nil: - content, err = r.scraperCache().ScrapeName(ctx, *source.ScraperID, *input.Query, models.ScrapeContentTypeScene) + content, err = r.scraperCache().ScrapeName(ctx, *source.ScraperID, *input.Query, scraper.ScrapeContentTypeScene) default: err = fmt.Errorf("%w: scene_id, scene_input, or query must be set", ErrInput) } @@ -307,7 +307,7 @@ func (r *queryResolver) ScrapeSingleScene(ctx context.Context, source models.Scr return ret, nil } -func (r *queryResolver) ScrapeMultiScenes(ctx context.Context, source models.ScraperSourceInput, input models.ScrapeMultiScenesInput) ([][]*models.ScrapedScene, error) { +func (r *queryResolver) ScrapeMultiScenes(ctx context.Context, source scraper.Source, input ScrapeMultiScenesInput) ([][]*scraper.ScrapedScene, error) { if source.ScraperID != nil { return nil, ErrNotImplemented } else if source.StashBoxIndex != nil { @@ -327,7 +327,7 @@ func (r *queryResolver) ScrapeMultiScenes(ctx context.Context, source models.Scr return nil, errors.New("scraper_id or stash_box_index must be set") } -func (r *queryResolver) ScrapeSinglePerformer(ctx context.Context, source models.ScraperSourceInput, input models.ScrapeSinglePerformerInput) ([]*models.ScrapedPerformer, error) { +func (r *queryResolver) ScrapeSinglePerformer(ctx context.Context, source scraper.Source, input ScrapeSinglePerformerInput) ([]*models.ScrapedPerformer, error) { if source.ScraperID != nil { if input.PerformerInput != nil { performer, err := r.scraperCache().ScrapeFragment(ctx, *source.ScraperID, scraper.Input{Performer: input.PerformerInput}) @@ -335,11 +335,11 @@ func (r *queryResolver) ScrapeSinglePerformer(ctx context.Context, source models return nil, err } - return marshalScrapedPerformers([]models.ScrapedContent{performer}) + return marshalScrapedPerformers([]scraper.ScrapedContent{performer}) } if input.Query != nil { - content, err := r.scraperCache().ScrapeName(ctx, *source.ScraperID, *input.Query, models.ScrapeContentTypePerformer) + content, err := r.scraperCache().ScrapeName(ctx, *source.ScraperID, *input.Query, scraper.ScrapeContentTypePerformer) if err != nil { return nil, err } @@ -354,7 +354,7 @@ func (r *queryResolver) ScrapeSinglePerformer(ctx context.Context, source models return nil, err } - var ret []*models.StashBoxPerformerQueryResult + var ret []*stashbox.StashBoxPerformerQueryResult switch { case input.PerformerID != nil: ret, err = client.FindStashBoxPerformersByNames(ctx, []string{*input.PerformerID}) @@ -378,7 +378,7 @@ func (r *queryResolver) ScrapeSinglePerformer(ctx context.Context, source models return nil, errors.New("scraper_id or stash_box_index must be set") } -func (r *queryResolver) ScrapeMultiPerformers(ctx context.Context, source models.ScraperSourceInput, input models.ScrapeMultiPerformersInput) ([][]*models.ScrapedPerformer, error) { +func (r *queryResolver) ScrapeMultiPerformers(ctx context.Context, source scraper.Source, input ScrapeMultiPerformersInput) ([][]*models.ScrapedPerformer, error) { if source.ScraperID != nil { return nil, ErrNotImplemented } else if source.StashBoxIndex != nil { @@ -393,7 +393,7 @@ func (r *queryResolver) ScrapeMultiPerformers(ctx context.Context, source models return nil, errors.New("scraper_id or stash_box_index must be set") } -func (r *queryResolver) ScrapeSingleGallery(ctx context.Context, source models.ScraperSourceInput, input models.ScrapeSingleGalleryInput) ([]*models.ScrapedGallery, error) { +func (r *queryResolver) ScrapeSingleGallery(ctx context.Context, source scraper.Source, input ScrapeSingleGalleryInput) ([]*scraper.ScrapedGallery, error) { if source.StashBoxIndex != nil { return nil, ErrNotSupported } @@ -402,7 +402,7 @@ func (r *queryResolver) ScrapeSingleGallery(ctx context.Context, source models.S return nil, fmt.Errorf("%w: scraper_id must be set", ErrInput) } - var c models.ScrapedContent + var c scraper.ScrapedContent switch { case input.GalleryID != nil: @@ -410,22 +410,22 @@ func (r *queryResolver) ScrapeSingleGallery(ctx context.Context, source models.S if err != nil { return nil, fmt.Errorf("%w: gallery id is not an integer: '%s'", ErrInput, *input.GalleryID) } - c, err = r.scraperCache().ScrapeID(ctx, *source.ScraperID, galleryID, models.ScrapeContentTypeGallery) + c, err = r.scraperCache().ScrapeID(ctx, *source.ScraperID, galleryID, scraper.ScrapeContentTypeGallery) if err != nil { return nil, err } - return marshalScrapedGalleries([]models.ScrapedContent{c}) + return marshalScrapedGalleries([]scraper.ScrapedContent{c}) case input.GalleryInput != nil: c, err := r.scraperCache().ScrapeFragment(ctx, *source.ScraperID, scraper.Input{Gallery: input.GalleryInput}) if err != nil { return nil, err } - return marshalScrapedGalleries([]models.ScrapedContent{c}) + return marshalScrapedGalleries([]scraper.ScrapedContent{c}) default: return nil, ErrNotImplemented } } -func (r *queryResolver) ScrapeSingleMovie(ctx context.Context, source models.ScraperSourceInput, input models.ScrapeSingleMovieInput) ([]*models.ScrapedMovie, error) { +func (r *queryResolver) ScrapeSingleMovie(ctx context.Context, source scraper.Source, input ScrapeSingleMovieInput) ([]*models.ScrapedMovie, error) { return nil, ErrNotSupported } diff --git a/internal/api/resolver_subscription_job.go b/internal/api/resolver_subscription_job.go index 8e2da6654..84ebee400 100644 --- a/internal/api/resolver_subscription_job.go +++ b/internal/api/resolver_subscription_job.go @@ -5,18 +5,17 @@ import ( "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/pkg/job" - "github.com/stashapp/stash/pkg/models" ) -func makeJobStatusUpdate(t models.JobStatusUpdateType, j job.Job) *models.JobStatusUpdate { - return &models.JobStatusUpdate{ +func makeJobStatusUpdate(t JobStatusUpdateType, j job.Job) *JobStatusUpdate { + return &JobStatusUpdate{ Type: t, Job: jobToJobModel(j), } } -func (r *subscriptionResolver) JobsSubscribe(ctx context.Context) (<-chan *models.JobStatusUpdate, error) { - msg := make(chan *models.JobStatusUpdate, 100) +func (r *subscriptionResolver) JobsSubscribe(ctx context.Context) (<-chan *JobStatusUpdate, error) { + msg := make(chan *JobStatusUpdate, 100) subscription := manager.GetInstance().JobManager.Subscribe(ctx) @@ -24,11 +23,11 @@ func (r *subscriptionResolver) JobsSubscribe(ctx context.Context) (<-chan *model for { select { case j := <-subscription.NewJob: - msg <- makeJobStatusUpdate(models.JobStatusUpdateTypeAdd, j) + msg <- makeJobStatusUpdate(JobStatusUpdateTypeAdd, j) case j := <-subscription.RemovedJob: - msg <- makeJobStatusUpdate(models.JobStatusUpdateTypeRemove, j) + msg <- makeJobStatusUpdate(JobStatusUpdateTypeRemove, j) case j := <-subscription.UpdatedJob: - msg <- makeJobStatusUpdate(models.JobStatusUpdateTypeUpdate, j) + msg <- makeJobStatusUpdate(JobStatusUpdateTypeUpdate, j) case <-ctx.Done(): close(msg) return diff --git a/internal/api/resolver_subscription_logging.go b/internal/api/resolver_subscription_logging.go index 17241e12b..423fa88af 100644 --- a/internal/api/resolver_subscription_logging.go +++ b/internal/api/resolver_subscription_logging.go @@ -5,33 +5,32 @@ import ( "github.com/stashapp/stash/internal/log" "github.com/stashapp/stash/internal/manager" - "github.com/stashapp/stash/pkg/models" ) -func getLogLevel(logType string) models.LogLevel { +func getLogLevel(logType string) LogLevel { switch logType { case "progress": - return models.LogLevelProgress + return LogLevelProgress case "trace": - return models.LogLevelTrace + return LogLevelTrace case "debug": - return models.LogLevelDebug + return LogLevelDebug case "info": - return models.LogLevelInfo + return LogLevelInfo case "warn": - return models.LogLevelWarning + return LogLevelWarning case "error": - return models.LogLevelError + return LogLevelError default: - return models.LogLevelDebug + return LogLevelDebug } } -func logEntriesFromLogItems(logItems []log.LogItem) []*models.LogEntry { - ret := make([]*models.LogEntry, len(logItems)) +func logEntriesFromLogItems(logItems []log.LogItem) []*LogEntry { + ret := make([]*LogEntry, len(logItems)) for i, entry := range logItems { - ret[i] = &models.LogEntry{ + ret[i] = &LogEntry{ Time: entry.Time, Level: getLogLevel(entry.Type), Message: entry.Message, @@ -41,8 +40,8 @@ func logEntriesFromLogItems(logItems []log.LogItem) []*models.LogEntry { return ret } -func (r *subscriptionResolver) LoggingSubscribe(ctx context.Context) (<-chan []*models.LogEntry, error) { - ret := make(chan []*models.LogEntry, 100) +func (r *subscriptionResolver) LoggingSubscribe(ctx context.Context) (<-chan []*LogEntry, error) { + ret := make(chan []*LogEntry, 100) stop := make(chan int, 1) logger := manager.GetInstance().Logger logSub := logger.SubscribeToLog(stop) diff --git a/internal/api/routes_image.go b/internal/api/routes_image.go index 8ba2e50d5..2a3098bb5 100644 --- a/internal/api/routes_image.go +++ b/internal/api/routes_image.go @@ -9,21 +9,30 @@ import ( "github.com/go-chi/chi" "github.com/stashapp/stash/internal/manager" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" ) +type ImageFinder interface { + Find(ctx context.Context, id int) (*models.Image, error) + FindByChecksum(ctx context.Context, checksum string) ([]*models.Image, error) +} + type imageRoutes struct { - txnManager models.TransactionManager + txnManager txn.Manager + imageFinder ImageFinder + fileFinder file.Finder } func (rs imageRoutes) Routes() chi.Router { r := chi.NewRouter() r.Route("/{imageId}", func(r chi.Router) { - r.Use(ImageCtx) + r.Use(rs.ImageCtx) r.Get("/image", rs.Image) r.Get("/thumbnail", rs.Thumbnail) @@ -45,12 +54,20 @@ func (rs imageRoutes) Thumbnail(w http.ResponseWriter, r *http.Request) { if exists { http.ServeFile(w, r, filepath) } else { + // don't return anything if there is no file + f := img.Files.Primary() + if f == nil { + // TODO - probably want to return a placeholder + http.Error(w, http.StatusText(404), 404) + return + } + encoder := image.NewThumbnailEncoder(manager.GetInstance().FFMPEG) - data, err := encoder.GetThumbnail(img, models.DefaultGthumbWidth) + data, err := encoder.GetThumbnail(f, models.DefaultGthumbWidth) if err != nil { // don't log for unsupported image format if !errors.Is(err, image.ErrNotSupportedForThumbnail) { - logger.Errorf("error generating thumbnail for image: %s", err.Error()) + logger.Errorf("error generating thumbnail for %s: %v", f.Path, err) var exitErr *exec.ExitError if errors.As(err, &exitErr) { @@ -80,23 +97,36 @@ func (rs imageRoutes) Image(w http.ResponseWriter, r *http.Request) { i := r.Context().Value(imageKey).(*models.Image) // if image is in a zip file, we need to serve it specifically - image.Serve(w, r, i.Path) + + if i.Files.Primary() == nil { + http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) + return + } + + i.Files.Primary().Serve(&file.OsFS{}, w, r) } // endregion -func ImageCtx(next http.Handler) http.Handler { +func (rs imageRoutes) ImageCtx(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { imageIdentifierQueryParam := chi.URLParam(r, "imageId") imageID, _ := strconv.Atoi(imageIdentifierQueryParam) var image *models.Image - readTxnErr := manager.GetInstance().TxnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { - qb := repo.Image() + readTxnErr := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { + qb := rs.imageFinder if imageID == 0 { - image, _ = qb.FindByChecksum(imageIdentifierQueryParam) + images, _ := qb.FindByChecksum(ctx, imageIdentifierQueryParam) + if len(images) > 0 { + image = images[0] + } } else { - image, _ = qb.Find(imageID) + image, _ = qb.Find(ctx, imageID) + } + + if image != nil { + _ = image.LoadPrimaryFile(ctx, rs.fileFinder) } return nil diff --git a/internal/api/routes_movie.go b/internal/api/routes_movie.go index 439b1e4d3..8fbccdb53 100644 --- a/internal/api/routes_movie.go +++ b/internal/api/routes_movie.go @@ -6,21 +6,28 @@ import ( "strconv" "github.com/go-chi/chi" - "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/utils" ) +type MovieFinder interface { + GetFrontImage(ctx context.Context, movieID int) ([]byte, error) + GetBackImage(ctx context.Context, movieID int) ([]byte, error) + Find(ctx context.Context, id int) (*models.Movie, error) +} + type movieRoutes struct { - txnManager models.TransactionManager + txnManager txn.Manager + movieFinder MovieFinder } func (rs movieRoutes) Routes() chi.Router { r := chi.NewRouter() r.Route("/{movieId}", func(r chi.Router) { - r.Use(MovieCtx) + r.Use(rs.MovieCtx) r.Get("/frontimage", rs.FrontImage) r.Get("/backimage", rs.BackImage) }) @@ -33,8 +40,8 @@ func (rs movieRoutes) FrontImage(w http.ResponseWriter, r *http.Request) { defaultParam := r.URL.Query().Get("default") var image []byte if defaultParam != "true" { - err := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { - image, _ = repo.Movie().GetFrontImage(movie.ID) + err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { + image, _ = rs.movieFinder.GetFrontImage(ctx, movie.ID) return nil }) if err != nil { @@ -56,8 +63,8 @@ func (rs movieRoutes) BackImage(w http.ResponseWriter, r *http.Request) { defaultParam := r.URL.Query().Get("default") var image []byte if defaultParam != "true" { - err := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { - image, _ = repo.Movie().GetBackImage(movie.ID) + err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { + image, _ = rs.movieFinder.GetBackImage(ctx, movie.ID) return nil }) if err != nil { @@ -74,7 +81,7 @@ func (rs movieRoutes) BackImage(w http.ResponseWriter, r *http.Request) { } } -func MovieCtx(next http.Handler) http.Handler { +func (rs movieRoutes) MovieCtx(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { movieID, err := strconv.Atoi(chi.URLParam(r, "movieId")) if err != nil { @@ -83,9 +90,9 @@ func MovieCtx(next http.Handler) http.Handler { } var movie *models.Movie - if err := manager.GetInstance().TxnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { + if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { var err error - movie, err = repo.Movie().Find(movieID) + movie, err = rs.movieFinder.Find(ctx, movieID) return err }); err != nil { http.Error(w, http.StatusText(404), 404) diff --git a/internal/api/routes_performer.go b/internal/api/routes_performer.go index e5c0bb862..15ad3c743 100644 --- a/internal/api/routes_performer.go +++ b/internal/api/routes_performer.go @@ -6,22 +6,28 @@ import ( "strconv" "github.com/go-chi/chi" - "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/internal/manager/config" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/utils" ) +type PerformerFinder interface { + Find(ctx context.Context, id int) (*models.Performer, error) + GetImage(ctx context.Context, performerID int) ([]byte, error) +} + type performerRoutes struct { - txnManager models.TransactionManager + txnManager txn.Manager + performerFinder PerformerFinder } func (rs performerRoutes) Routes() chi.Router { r := chi.NewRouter() r.Route("/{performerId}", func(r chi.Router) { - r.Use(PerformerCtx) + r.Use(rs.PerformerCtx) r.Get("/image", rs.Image) }) @@ -34,8 +40,8 @@ func (rs performerRoutes) Image(w http.ResponseWriter, r *http.Request) { var image []byte if defaultParam != "true" { - readTxnErr := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { - image, _ = repo.Performer().GetImage(performer.ID) + readTxnErr := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { + image, _ = rs.performerFinder.GetImage(ctx, performer.ID) return nil }) if readTxnErr != nil { @@ -52,7 +58,7 @@ func (rs performerRoutes) Image(w http.ResponseWriter, r *http.Request) { } } -func PerformerCtx(next http.Handler) http.Handler { +func (rs performerRoutes) PerformerCtx(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { performerID, err := strconv.Atoi(chi.URLParam(r, "performerId")) if err != nil { @@ -61,9 +67,9 @@ func PerformerCtx(next http.Handler) http.Handler { } var performer *models.Performer - if err := manager.GetInstance().TxnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { + if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { var err error - performer, err = repo.Performer().Find(performerID) + performer, err = rs.performerFinder.Find(ctx, performerID) return err }); err != nil { http.Error(w, http.StatusText(404), 404) diff --git a/internal/api/routes_scene.go b/internal/api/routes_scene.go index 3612da72d..d6fb0847f 100644 --- a/internal/api/routes_scene.go +++ b/internal/api/routes_scene.go @@ -11,22 +11,47 @@ import ( "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/internal/manager/config" "github.com/stashapp/stash/pkg/ffmpeg" + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/file/video" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/utils" ) +type SceneFinder interface { + manager.SceneCoverGetter + + scene.IDFinder + FindByChecksum(ctx context.Context, checksum string) ([]*models.Scene, error) + FindByOSHash(ctx context.Context, oshash string) ([]*models.Scene, error) +} + +type SceneMarkerFinder interface { + Find(ctx context.Context, id int) (*models.SceneMarker, error) + FindBySceneID(ctx context.Context, sceneID int) ([]*models.SceneMarker, error) +} + +type CaptionFinder interface { + GetCaptions(ctx context.Context, fileID file.ID) ([]*models.VideoCaption, error) +} + type sceneRoutes struct { - txnManager models.TransactionManager + txnManager txn.Manager + sceneFinder SceneFinder + fileFinder file.Finder + captionFinder CaptionFinder + sceneMarkerFinder SceneMarkerFinder + tagFinder scene.MarkerTagFinder } func (rs sceneRoutes) Routes() chi.Router { r := chi.NewRouter() r.Route("/{sceneId}", func(r chi.Router) { - r.Use(SceneCtx) + r.Use(rs.SceneCtx) // streaming endpoints r.Get("/stream", rs.StreamDirect) @@ -48,8 +73,8 @@ func (rs sceneRoutes) Routes() chi.Router { r.Get("/scene_marker/{sceneMarkerId}/preview", rs.SceneMarkerPreview) r.Get("/scene_marker/{sceneMarkerId}/screenshot", rs.SceneMarkerScreenshot) }) - r.With(SceneCtx).Get("/{sceneId}_thumbs.vtt", rs.VttThumbs) - r.With(SceneCtx).Get("/{sceneId}_sprite.jpg", rs.VttSprite) + r.With(rs.SceneCtx).Get("/{sceneId}_thumbs.vtt", rs.VttThumbs) + r.With(rs.SceneCtx).Get("/{sceneId}_sprite.jpg", rs.VttSprite) return r } @@ -60,7 +85,8 @@ func (rs sceneRoutes) StreamDirect(w http.ResponseWriter, r *http.Request) { scene := r.Context().Value(sceneKey).(*models.Scene) ss := manager.SceneServer{ - TXNManager: rs.txnManager, + TxnManager: rs.txnManager, + SceneCoverGetter: rs.sceneFinder, } ss.StreamSceneDirect(scene, w, r) } @@ -69,7 +95,12 @@ func (rs sceneRoutes) StreamMKV(w http.ResponseWriter, r *http.Request) { // only allow mkv streaming if the scene container is an mkv already scene := r.Context().Value(sceneKey).(*models.Scene) - container, err := manager.GetSceneFileContainer(scene) + pf := scene.Files.Primary() + if pf == nil { + return + } + + container, err := manager.GetVideoFileContainer(pf) if err != nil { logger.Errorf("[transcode] error getting container: %v", err) } @@ -96,10 +127,8 @@ func (rs sceneRoutes) StreamMp4(w http.ResponseWriter, r *http.Request) { func (rs sceneRoutes) StreamHLS(w http.ResponseWriter, r *http.Request) { scene := r.Context().Value(sceneKey).(*models.Scene) - ffprobe := manager.GetInstance().FFProbe - videoFile, err := ffprobe.NewVideoFile(scene.Path) - if err != nil { - logger.Errorf("[stream] error reading video file: %v", err) + pf := scene.Files.Primary() + if pf == nil { return } @@ -109,7 +138,7 @@ func (rs sceneRoutes) StreamHLS(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", ffmpeg.MimeHLS) var str strings.Builder - ffmpeg.WriteHLSPlaylist(videoFile.Duration, r.URL.String(), &str) + ffmpeg.WriteHLSPlaylist(pf.Duration, r.URL.String(), &str) requestByteRange := createByteRange(r.Header.Get("Range")) if requestByteRange.RawString != "" { @@ -130,9 +159,14 @@ func (rs sceneRoutes) StreamTS(w http.ResponseWriter, r *http.Request) { } func (rs sceneRoutes) streamTranscode(w http.ResponseWriter, r *http.Request, streamFormat ffmpeg.StreamFormat) { - logger.Debugf("Streaming as %s", streamFormat.MimeType) scene := r.Context().Value(sceneKey).(*models.Scene) + f := scene.Files.Primary() + if f == nil { + return + } + logger.Debugf("Streaming as %s", streamFormat.MimeType) + // start stream based on query param, if provided if err := r.ParseForm(); err != nil { logger.Warnf("[stream] error parsing query form: %v", err) @@ -143,17 +177,20 @@ func (rs sceneRoutes) streamTranscode(w http.ResponseWriter, r *http.Request, st requestedSize := r.Form.Get("resolution") audioCodec := ffmpeg.MissingUnsupported - if scene.AudioCodec.Valid { - audioCodec = ffmpeg.ProbeAudioCodec(scene.AudioCodec.String) + if f.AudioCodec != "" { + audioCodec = ffmpeg.ProbeAudioCodec(f.AudioCodec) } + width := f.Width + height := f.Height + options := ffmpeg.TranscodeStreamOptions{ - Input: scene.Path, + Input: f.Path, Codec: streamFormat, VideoOnly: audioCodec == ffmpeg.MissingUnsupported, - VideoWidth: int(scene.Width.Int64), - VideoHeight: int(scene.Height.Int64), + VideoWidth: width, + VideoHeight: height, StartTime: ss, MaxTranscodeSize: config.GetInstance().GetMaxStreamingTranscodeSize().GetMaxResolution(), @@ -167,7 +204,7 @@ func (rs sceneRoutes) streamTranscode(w http.ResponseWriter, r *http.Request, st lm := manager.GetInstance().ReadLockManager streamRequestCtx := manager.NewStreamRequestContext(w, r) - lockCtx := lm.ReadLock(streamRequestCtx, scene.Path) + lockCtx := lm.ReadLock(streamRequestCtx, f.Path) defer lockCtx.Cancel() stream, err := encoder.GetTranscodeStream(lockCtx, options) @@ -190,7 +227,8 @@ func (rs sceneRoutes) Screenshot(w http.ResponseWriter, r *http.Request) { scene := r.Context().Value(sceneKey).(*models.Scene) ss := manager.SceneServer{ - TXNManager: rs.txnManager, + TxnManager: rs.txnManager, + SceneCoverGetter: rs.sceneFinder, } ss.ServeScreenshot(scene, w, r) } @@ -221,16 +259,16 @@ func (rs sceneRoutes) getChapterVttTitle(ctx context.Context, marker *models.Sce } var ret string - if err := rs.txnManager.WithReadTxn(ctx, func(repo models.ReaderRepository) error { - qb := repo.Tag() - primaryTag, err := qb.Find(marker.PrimaryTagID) + if err := txn.WithTxn(ctx, rs.txnManager, func(ctx context.Context) error { + qb := rs.tagFinder + primaryTag, err := qb.Find(ctx, marker.PrimaryTagID) if err != nil { return err } ret = primaryTag.Name - tags, err := qb.FindBySceneMarkerID(marker.ID) + tags, err := qb.FindBySceneMarkerID(ctx, marker.ID) if err != nil { return err } @@ -250,9 +288,9 @@ func (rs sceneRoutes) getChapterVttTitle(ctx context.Context, marker *models.Sce func (rs sceneRoutes) ChapterVtt(w http.ResponseWriter, r *http.Request) { scene := r.Context().Value(sceneKey).(*models.Scene) var sceneMarkers []*models.SceneMarker - if err := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { + if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { var err error - sceneMarkers, err = repo.SceneMarker().FindBySceneID(scene.ID) + sceneMarkers, err = rs.sceneMarkerFinder.FindBySceneID(ctx, scene.ID) return err }); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -275,7 +313,7 @@ func (rs sceneRoutes) ChapterVtt(w http.ResponseWriter, r *http.Request) { func (rs sceneRoutes) Funscript(w http.ResponseWriter, r *http.Request) { s := r.Context().Value(sceneKey).(*models.Scene) - funscript := scene.GetFunscriptPath(s.Path) + funscript := video.GetFunscriptPath(s.Path) serveFileNoCache(w, r, funscript) } @@ -289,12 +327,17 @@ func (rs sceneRoutes) InteractiveHeatmap(w http.ResponseWriter, r *http.Request) func (rs sceneRoutes) Caption(w http.ResponseWriter, r *http.Request, lang string, ext string) { s := r.Context().Value(sceneKey).(*models.Scene) - if err := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { + if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { var err error - captions, err := repo.Scene().GetCaptions(s.ID) + primaryFile := s.Files.Primary() + if primaryFile == nil { + return nil + } + + captions, err := rs.captionFinder.GetCaptions(ctx, primaryFile.Base().ID) for _, caption := range captions { if lang == caption.LanguageCode && ext == caption.CaptionType { - sub, err := scene.ReadSubs(caption.Path(s.Path)) + sub, err := video.ReadSubs(caption.Path(s.Path)) if err == nil { var b bytes.Buffer err = sub.WriteToWebVTT(&b) @@ -344,9 +387,9 @@ func (rs sceneRoutes) SceneMarkerStream(w http.ResponseWriter, r *http.Request) scene := r.Context().Value(sceneKey).(*models.Scene) sceneMarkerID, _ := strconv.Atoi(chi.URLParam(r, "sceneMarkerId")) var sceneMarker *models.SceneMarker - if err := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { + if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { var err error - sceneMarker, err = repo.SceneMarker().Find(sceneMarkerID) + sceneMarker, err = rs.sceneMarkerFinder.Find(ctx, sceneMarkerID) return err }); err != nil { logger.Warnf("Error when getting scene marker for stream: %s", err.Error()) @@ -367,9 +410,9 @@ func (rs sceneRoutes) SceneMarkerPreview(w http.ResponseWriter, r *http.Request) scene := r.Context().Value(sceneKey).(*models.Scene) sceneMarkerID, _ := strconv.Atoi(chi.URLParam(r, "sceneMarkerId")) var sceneMarker *models.SceneMarker - if err := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { + if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { var err error - sceneMarker, err = repo.SceneMarker().Find(sceneMarkerID) + sceneMarker, err = rs.sceneMarkerFinder.Find(ctx, sceneMarkerID) return err }); err != nil { logger.Warnf("Error when getting scene marker for stream: %s", err.Error()) @@ -400,9 +443,9 @@ func (rs sceneRoutes) SceneMarkerScreenshot(w http.ResponseWriter, r *http.Reque scene := r.Context().Value(sceneKey).(*models.Scene) sceneMarkerID, _ := strconv.Atoi(chi.URLParam(r, "sceneMarkerId")) var sceneMarker *models.SceneMarker - if err := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { + if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { var err error - sceneMarker, err = repo.SceneMarker().Find(sceneMarkerID) + sceneMarker, err = rs.sceneMarkerFinder.Find(ctx, sceneMarkerID) return err }); err != nil { logger.Warnf("Error when getting scene marker for stream: %s", err.Error()) @@ -431,23 +474,33 @@ func (rs sceneRoutes) SceneMarkerScreenshot(w http.ResponseWriter, r *http.Reque // endregion -func SceneCtx(next http.Handler) http.Handler { +func (rs sceneRoutes) SceneCtx(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { sceneIdentifierQueryParam := chi.URLParam(r, "sceneId") sceneID, _ := strconv.Atoi(sceneIdentifierQueryParam) var scene *models.Scene - readTxnErr := manager.GetInstance().TxnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { - qb := repo.Scene() + readTxnErr := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { + qb := rs.sceneFinder if sceneID == 0 { + var scenes []*models.Scene // determine checksum/os by the length of the query param if len(sceneIdentifierQueryParam) == 32 { - scene, _ = qb.FindByChecksum(sceneIdentifierQueryParam) + scenes, _ = qb.FindByChecksum(ctx, sceneIdentifierQueryParam) + } else { - scene, _ = qb.FindByOSHash(sceneIdentifierQueryParam) + scenes, _ = qb.FindByOSHash(ctx, sceneIdentifierQueryParam) + } + + if len(scenes) > 0 { + scene = scenes[0] } } else { - scene, _ = qb.Find(sceneID) + scene, _ = qb.Find(ctx, sceneID) + } + + if scene != nil { + _ = scene.LoadPrimaryFile(ctx, rs.fileFinder) } return nil diff --git a/internal/api/routes_studio.go b/internal/api/routes_studio.go index 18f78b30c..e26499f04 100644 --- a/internal/api/routes_studio.go +++ b/internal/api/routes_studio.go @@ -8,21 +8,28 @@ import ( "syscall" "github.com/go-chi/chi" - "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/studio" + "github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/utils" ) +type StudioFinder interface { + studio.Finder + GetImage(ctx context.Context, studioID int) ([]byte, error) +} + type studioRoutes struct { - txnManager models.TransactionManager + txnManager txn.Manager + studioFinder StudioFinder } func (rs studioRoutes) Routes() chi.Router { r := chi.NewRouter() r.Route("/{studioId}", func(r chi.Router) { - r.Use(StudioCtx) + r.Use(rs.StudioCtx) r.Get("/image", rs.Image) }) @@ -35,8 +42,8 @@ func (rs studioRoutes) Image(w http.ResponseWriter, r *http.Request) { var image []byte if defaultParam != "true" { - err := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { - image, _ = repo.Studio().GetImage(studio.ID) + err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { + image, _ = rs.studioFinder.GetImage(ctx, studio.ID) return nil }) if err != nil { @@ -58,7 +65,7 @@ func (rs studioRoutes) Image(w http.ResponseWriter, r *http.Request) { } } -func StudioCtx(next http.Handler) http.Handler { +func (rs studioRoutes) StudioCtx(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { studioID, err := strconv.Atoi(chi.URLParam(r, "studioId")) if err != nil { @@ -67,9 +74,9 @@ func StudioCtx(next http.Handler) http.Handler { } var studio *models.Studio - if err := manager.GetInstance().TxnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { + if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { var err error - studio, err = repo.Studio().Find(studioID) + studio, err = rs.studioFinder.Find(ctx, studioID) return err }); err != nil { http.Error(w, http.StatusText(404), 404) diff --git a/internal/api/routes_tag.go b/internal/api/routes_tag.go index 8ffdc62c9..69c573cb2 100644 --- a/internal/api/routes_tag.go +++ b/internal/api/routes_tag.go @@ -6,21 +6,28 @@ import ( "strconv" "github.com/go-chi/chi" - "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/tag" + "github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/utils" ) +type TagFinder interface { + tag.Finder + GetImage(ctx context.Context, tagID int) ([]byte, error) +} + type tagRoutes struct { - txnManager models.TransactionManager + txnManager txn.Manager + tagFinder TagFinder } func (rs tagRoutes) Routes() chi.Router { r := chi.NewRouter() r.Route("/{tagId}", func(r chi.Router) { - r.Use(TagCtx) + r.Use(rs.TagCtx) r.Get("/image", rs.Image) }) @@ -33,8 +40,8 @@ func (rs tagRoutes) Image(w http.ResponseWriter, r *http.Request) { var image []byte if defaultParam != "true" { - err := rs.txnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { - image, _ = repo.Tag().GetImage(tag.ID) + err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { + image, _ = rs.tagFinder.GetImage(ctx, tag.ID) return nil }) if err != nil { @@ -51,7 +58,7 @@ func (rs tagRoutes) Image(w http.ResponseWriter, r *http.Request) { } } -func TagCtx(next http.Handler) http.Handler { +func (rs tagRoutes) TagCtx(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { tagID, err := strconv.Atoi(chi.URLParam(r, "tagId")) if err != nil { @@ -60,9 +67,9 @@ func TagCtx(next http.Handler) http.Handler { } var tag *models.Tag - if err := manager.GetInstance().TxnManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { + if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error { var err error - tag, err = repo.Tag().Find(tagID) + tag, err = rs.tagFinder.Find(ctx, tagID) return err }); err != nil { http.Error(w, http.StatusText(404), 404) diff --git a/internal/api/scraped_content.go b/internal/api/scraped_content.go index 46664724a..6d9003892 100644 --- a/internal/api/scraped_content.go +++ b/internal/api/scraped_content.go @@ -4,12 +4,13 @@ import ( "fmt" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/scraper" ) // marshalScrapedScenes converts ScrapedContent into ScrapedScene. If conversion fails, an // error is returned to the caller. -func marshalScrapedScenes(content []models.ScrapedContent) ([]*models.ScrapedScene, error) { - var ret []*models.ScrapedScene +func marshalScrapedScenes(content []scraper.ScrapedContent) ([]*scraper.ScrapedScene, error) { + var ret []*scraper.ScrapedScene for _, c := range content { if c == nil { // graphql schema requires scenes to be non-nil @@ -17,9 +18,9 @@ func marshalScrapedScenes(content []models.ScrapedContent) ([]*models.ScrapedSce } switch s := c.(type) { - case *models.ScrapedScene: + case *scraper.ScrapedScene: ret = append(ret, s) - case models.ScrapedScene: + case scraper.ScrapedScene: ret = append(ret, &s) default: return nil, fmt.Errorf("%w: cannot turn ScrapedContent into ScrapedScene", models.ErrConversion) @@ -31,7 +32,7 @@ func marshalScrapedScenes(content []models.ScrapedContent) ([]*models.ScrapedSce // marshalScrapedPerformers converts ScrapedContent into ScrapedPerformer. If conversion // fails, an error is returned to the caller. -func marshalScrapedPerformers(content []models.ScrapedContent) ([]*models.ScrapedPerformer, error) { +func marshalScrapedPerformers(content []scraper.ScrapedContent) ([]*models.ScrapedPerformer, error) { var ret []*models.ScrapedPerformer for _, c := range content { if c == nil { @@ -54,8 +55,8 @@ func marshalScrapedPerformers(content []models.ScrapedContent) ([]*models.Scrape // marshalScrapedGalleries converts ScrapedContent into ScrapedGallery. If // conversion fails, an error is returned. -func marshalScrapedGalleries(content []models.ScrapedContent) ([]*models.ScrapedGallery, error) { - var ret []*models.ScrapedGallery +func marshalScrapedGalleries(content []scraper.ScrapedContent) ([]*scraper.ScrapedGallery, error) { + var ret []*scraper.ScrapedGallery for _, c := range content { if c == nil { // graphql schema requires galleries to be non-nil @@ -63,9 +64,9 @@ func marshalScrapedGalleries(content []models.ScrapedContent) ([]*models.Scraped } switch g := c.(type) { - case *models.ScrapedGallery: + case *scraper.ScrapedGallery: ret = append(ret, g) - case models.ScrapedGallery: + case scraper.ScrapedGallery: ret = append(ret, &g) default: return nil, fmt.Errorf("%w: cannot turn ScrapedContent into ScrapedGallery", models.ErrConversion) @@ -77,7 +78,7 @@ func marshalScrapedGalleries(content []models.ScrapedContent) ([]*models.Scraped // marshalScrapedMovies converts ScrapedContent into ScrapedMovie. If conversion // fails, an error is returned. -func marshalScrapedMovies(content []models.ScrapedContent) ([]*models.ScrapedMovie, error) { +func marshalScrapedMovies(content []scraper.ScrapedContent) ([]*models.ScrapedMovie, error) { var ret []*models.ScrapedMovie for _, c := range content { if c == nil { @@ -99,8 +100,8 @@ func marshalScrapedMovies(content []models.ScrapedContent) ([]*models.ScrapedMov } // marshalScrapedPerformer will marshal a single performer -func marshalScrapedPerformer(content models.ScrapedContent) (*models.ScrapedPerformer, error) { - p, err := marshalScrapedPerformers([]models.ScrapedContent{content}) +func marshalScrapedPerformer(content scraper.ScrapedContent) (*models.ScrapedPerformer, error) { + p, err := marshalScrapedPerformers([]scraper.ScrapedContent{content}) if err != nil { return nil, err } @@ -109,8 +110,8 @@ func marshalScrapedPerformer(content models.ScrapedContent) (*models.ScrapedPerf } // marshalScrapedScene will marshal a single scraped scene -func marshalScrapedScene(content models.ScrapedContent) (*models.ScrapedScene, error) { - s, err := marshalScrapedScenes([]models.ScrapedContent{content}) +func marshalScrapedScene(content scraper.ScrapedContent) (*scraper.ScrapedScene, error) { + s, err := marshalScrapedScenes([]scraper.ScrapedContent{content}) if err != nil { return nil, err } @@ -119,8 +120,8 @@ func marshalScrapedScene(content models.ScrapedContent) (*models.ScrapedScene, e } // marshalScrapedGallery will marshal a single scraped gallery -func marshalScrapedGallery(content models.ScrapedContent) (*models.ScrapedGallery, error) { - g, err := marshalScrapedGalleries([]models.ScrapedContent{content}) +func marshalScrapedGallery(content scraper.ScrapedContent) (*scraper.ScrapedGallery, error) { + g, err := marshalScrapedGalleries([]scraper.ScrapedContent{content}) if err != nil { return nil, err } @@ -129,8 +130,8 @@ func marshalScrapedGallery(content models.ScrapedContent) (*models.ScrapedGaller } // marshalScrapedMovie will marshal a single scraped movie -func marshalScrapedMovie(content models.ScrapedContent) (*models.ScrapedMovie, error) { - m, err := marshalScrapedMovies([]models.ScrapedContent{content}) +func marshalScrapedMovie(content scraper.ScrapedContent) (*models.ScrapedMovie, error) { + m, err := marshalScrapedMovies([]scraper.ScrapedContent{content}) if err != nil { return nil, err } diff --git a/internal/api/server.go b/internal/api/server.go index c89e20d48..646f647d6 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -26,11 +26,11 @@ import ( "github.com/go-chi/httplog" "github.com/rs/cors" + "github.com/stashapp/stash/internal/api/loaders" "github.com/stashapp/stash/internal/manager" "github.com/stashapp/stash/internal/manager/config" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/ui" ) @@ -74,14 +74,29 @@ func Start() error { return errors.New(message) } - txnManager := manager.GetInstance().TxnManager - pluginCache := manager.GetInstance().PluginCache - resolver := &Resolver{ - txnManager: txnManager, - hookExecutor: pluginCache, + txnManager := manager.GetInstance().Repository + + dataloaders := loaders.Middleware{ + DatabaseProvider: txnManager, + Repository: txnManager, } - gqlSrv := gqlHandler.New(models.NewExecutableSchema(models.Config{Resolvers: resolver})) + r.Use(dataloaders.Middleware) + + pluginCache := manager.GetInstance().PluginCache + sceneService := manager.GetInstance().SceneService + imageService := manager.GetInstance().ImageService + galleryService := manager.GetInstance().GalleryService + resolver := &Resolver{ + txnManager: txnManager, + repository: txnManager, + sceneService: sceneService, + imageService: imageService, + galleryService: galleryService, + hookExecutor: pluginCache, + } + + gqlSrv := gqlHandler.New(NewExecutableSchema(Config{Resolvers: resolver})) gqlSrv.SetRecoverFunc(recoverFunc) gqlSrv.AddTransport(gqlTransport.Websocket{ Upgrader: websocket.Upgrader{ @@ -119,22 +134,33 @@ func Start() error { r.Get(loginEndPoint, getLoginHandler(loginUIBox)) r.Mount("/performer", performerRoutes{ - txnManager: txnManager, + txnManager: txnManager, + performerFinder: txnManager.Performer, }.Routes()) r.Mount("/scene", sceneRoutes{ - txnManager: txnManager, + txnManager: txnManager, + sceneFinder: txnManager.Scene, + fileFinder: txnManager.File, + captionFinder: txnManager.File, + sceneMarkerFinder: txnManager.SceneMarker, + tagFinder: txnManager.Tag, }.Routes()) r.Mount("/image", imageRoutes{ - txnManager: txnManager, + txnManager: txnManager, + imageFinder: txnManager.Image, + fileFinder: txnManager.File, }.Routes()) r.Mount("/studio", studioRoutes{ - txnManager: txnManager, + txnManager: txnManager, + studioFinder: txnManager.Studio, }.Routes()) r.Mount("/movie", movieRoutes{ - txnManager: txnManager, + txnManager: txnManager, + movieFinder: txnManager.Movie, }.Routes()) r.Mount("/tag", tagRoutes{ txnManager: txnManager, + tagFinder: txnManager.Tag, }.Routes()) r.Mount("/downloads", downloadsRoutes{}.Routes()) diff --git a/internal/api/types.go b/internal/api/types.go index 9af592806..fb65420e3 100644 --- a/internal/api/types.go +++ b/internal/api/types.go @@ -1,6 +1,12 @@ package api -import "math" +import ( + "fmt" + "math" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/stringslice" +) // An enum https://golang.org/ref/spec#Iota const ( @@ -17,3 +23,41 @@ func handleFloat64(v float64) *float64 { return &v } + +func handleFloat64Value(v float64) float64 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return 0 + } + + return v +} + +func translateUpdateIDs(strIDs []string, mode models.RelationshipUpdateMode) (*models.UpdateIDs, error) { + ids, err := stringslice.StringSliceToIntSlice(strIDs) + if err != nil { + return nil, fmt.Errorf("converting ids [%v]: %w", strIDs, err) + } + return &models.UpdateIDs{ + IDs: ids, + Mode: mode, + }, nil +} + +func translateSceneMovieIDs(input BulkUpdateIds) (*models.UpdateMovieIDs, error) { + ids, err := stringslice.StringSliceToIntSlice(input.Ids) + if err != nil { + return nil, fmt.Errorf("converting ids [%v]: %w", input.Ids, err) + } + + ret := &models.UpdateMovieIDs{ + Mode: input.Mode, + } + + for _, id := range ids { + ret.Movies = append(ret.Movies, models.MoviesScenes{ + MovieID: id, + }) + } + + return ret, nil +} diff --git a/internal/api/urlbuilders/image.go b/internal/api/urlbuilders/image.go index 9594a4530..139c7ad17 100644 --- a/internal/api/urlbuilders/image.go +++ b/internal/api/urlbuilders/image.go @@ -1,8 +1,9 @@ package urlbuilders import ( - "github.com/stashapp/stash/pkg/models" "strconv" + + "github.com/stashapp/stash/pkg/models" ) type ImageURLBuilder struct { @@ -15,7 +16,7 @@ func NewImageURLBuilder(baseURL string, image *models.Image) ImageURLBuilder { return ImageURLBuilder{ BaseURL: baseURL, ImageID: strconv.Itoa(image.ID), - UpdatedAt: strconv.FormatInt(image.UpdatedAt.Timestamp.Unix(), 10), + UpdatedAt: strconv.FormatInt(image.UpdatedAt.Unix(), 10), } } diff --git a/internal/autotag/gallery.go b/internal/autotag/gallery.go index 3bdfd3c15..d2a8c2c5d 100644 --- a/internal/autotag/gallery.go +++ b/internal/autotag/gallery.go @@ -1,55 +1,99 @@ package autotag import ( + "context" + "github.com/stashapp/stash/pkg/gallery" "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) +type GalleryPerformerUpdater interface { + models.PerformerIDLoader + gallery.PartialUpdater +} + +type GalleryTagUpdater interface { + models.TagIDLoader + gallery.PartialUpdater +} + func getGalleryFileTagger(s *models.Gallery, cache *match.Cache) tagger { + var path string + if s.Path != "" { + path = s.Path + } + // only trim the extension if gallery is file-based - trimExt := s.Zip + trimExt := s.PrimaryFileID != nil return tagger{ ID: s.ID, Type: "gallery", - Name: s.GetTitle(), - Path: s.Path.String, + Name: s.DisplayName(), + Path: path, trimExt: trimExt, cache: cache, } } // GalleryPerformers tags the provided gallery with performers whose name matches the gallery's path. -func GalleryPerformers(s *models.Gallery, rw models.GalleryReaderWriter, performerReader models.PerformerReader, cache *match.Cache) error { +func GalleryPerformers(ctx context.Context, s *models.Gallery, rw GalleryPerformerUpdater, performerReader match.PerformerAutoTagQueryer, cache *match.Cache) error { t := getGalleryFileTagger(s, cache) - return t.tagPerformers(performerReader, func(subjectID, otherID int) (bool, error) { - return gallery.AddPerformer(rw, subjectID, otherID) + return t.tagPerformers(ctx, performerReader, func(subjectID, otherID int) (bool, error) { + if err := s.LoadPerformerIDs(ctx, rw); err != nil { + return false, err + } + existing := s.PerformerIDs.List() + + if intslice.IntInclude(existing, otherID) { + return false, nil + } + + if err := gallery.AddPerformer(ctx, rw, s, otherID); err != nil { + return false, err + } + + return true, nil }) } // GalleryStudios tags the provided gallery with the first studio whose name matches the gallery's path. // // Gallerys will not be tagged if studio is already set. -func GalleryStudios(s *models.Gallery, rw models.GalleryReaderWriter, studioReader models.StudioReader, cache *match.Cache) error { - if s.StudioID.Valid { +func GalleryStudios(ctx context.Context, s *models.Gallery, rw GalleryFinderUpdater, studioReader match.StudioAutoTagQueryer, cache *match.Cache) error { + if s.StudioID != nil { // don't modify return nil } t := getGalleryFileTagger(s, cache) - return t.tagStudios(studioReader, func(subjectID, otherID int) (bool, error) { - return addGalleryStudio(rw, subjectID, otherID) + return t.tagStudios(ctx, studioReader, func(subjectID, otherID int) (bool, error) { + return addGalleryStudio(ctx, rw, s, otherID) }) } // GalleryTags tags the provided gallery with tags whose name matches the gallery's path. -func GalleryTags(s *models.Gallery, rw models.GalleryReaderWriter, tagReader models.TagReader, cache *match.Cache) error { +func GalleryTags(ctx context.Context, s *models.Gallery, rw GalleryTagUpdater, tagReader match.TagAutoTagQueryer, cache *match.Cache) error { t := getGalleryFileTagger(s, cache) - return t.tagTags(tagReader, func(subjectID, otherID int) (bool, error) { - return gallery.AddTag(rw, subjectID, otherID) + return t.tagTags(ctx, tagReader, func(subjectID, otherID int) (bool, error) { + if err := s.LoadTagIDs(ctx, rw); err != nil { + return false, err + } + existing := s.TagIDs.List() + + if intslice.IntInclude(existing, otherID) { + return false, nil + } + + if err := gallery.AddTag(ctx, rw, s, otherID); err != nil { + return false, err + } + + return true, nil }) } diff --git a/internal/autotag/gallery_test.go b/internal/autotag/gallery_test.go index 6d744400a..ac7da4e26 100644 --- a/internal/autotag/gallery_test.go +++ b/internal/autotag/gallery_test.go @@ -1,6 +1,7 @@ package autotag import ( + "context" "testing" "github.com/stashapp/stash/pkg/models" @@ -11,6 +12,8 @@ import ( const galleryExt = "zip" +var testCtx = context.Background() + func TestGalleryPerformers(t *testing.T) { t.Parallel() @@ -37,19 +40,24 @@ func TestGalleryPerformers(t *testing.T) { mockPerformerReader := &mocks.PerformerReaderWriter{} mockGalleryReader := &mocks.GalleryReaderWriter{} - mockPerformerReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockPerformerReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Performer{&performer, &reversedPerformer}, nil).Once() + mockPerformerReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockPerformerReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Performer{&performer, &reversedPerformer}, nil).Once() if test.Matches { - mockGalleryReader.On("GetPerformerIDs", galleryID).Return(nil, nil).Once() - mockGalleryReader.On("UpdatePerformers", galleryID, []int{performerID}).Return(nil).Once() + mockGalleryReader.On("UpdatePartial", testCtx, galleryID, models.GalleryPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } gallery := models.Gallery{ - ID: galleryID, - Path: models.NullString(test.Path), + ID: galleryID, + Path: test.Path, + PerformerIDs: models.NewRelatedIDs([]int{}), } - err := GalleryPerformers(&gallery, mockGalleryReader, mockPerformerReader, nil) + err := GalleryPerformers(testCtx, &gallery, mockGalleryReader, mockPerformerReader, nil) assert.Nil(err) mockPerformerReader.AssertExpectations(t) @@ -62,7 +70,7 @@ func TestGalleryStudios(t *testing.T) { const galleryID = 1 const studioName = "studio name" - const studioID = 2 + var studioID = 2 studio := models.Studio{ ID: studioID, Name: models.NullString(studioName), @@ -81,19 +89,17 @@ func TestGalleryStudios(t *testing.T) { doTest := func(mockStudioReader *mocks.StudioReaderWriter, mockGalleryReader *mocks.GalleryReaderWriter, test pathTestTable) { if test.Matches { - mockGalleryReader.On("Find", galleryID).Return(&models.Gallery{}, nil).Once() - expectedStudioID := models.NullInt64(studioID) - mockGalleryReader.On("UpdatePartial", models.GalleryPartial{ - ID: galleryID, - StudioID: &expectedStudioID, + expectedStudioID := studioID + mockGalleryReader.On("UpdatePartial", testCtx, galleryID, models.GalleryPartial{ + StudioID: models.NewOptionalInt(expectedStudioID), }).Return(nil, nil).Once() } gallery := models.Gallery{ ID: galleryID, - Path: models.NullString(test.Path), + Path: test.Path, } - err := GalleryStudios(&gallery, mockGalleryReader, mockStudioReader, nil) + err := GalleryStudios(testCtx, &gallery, mockGalleryReader, mockStudioReader, nil) assert.Nil(err) mockStudioReader.AssertExpectations(t) @@ -104,9 +110,9 @@ func TestGalleryStudios(t *testing.T) { mockStudioReader := &mocks.StudioReaderWriter{} mockGalleryReader := &mocks.GalleryReaderWriter{} - mockStudioReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockStudioReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() - mockStudioReader.On("GetAliases", mock.Anything).Return([]string{}, nil).Maybe() + mockStudioReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockStudioReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() + mockStudioReader.On("GetAliases", testCtx, mock.Anything).Return([]string{}, nil).Maybe() doTest(mockStudioReader, mockGalleryReader, test) } @@ -119,12 +125,12 @@ func TestGalleryStudios(t *testing.T) { mockStudioReader := &mocks.StudioReaderWriter{} mockGalleryReader := &mocks.GalleryReaderWriter{} - mockStudioReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockStudioReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() - mockStudioReader.On("GetAliases", studioID).Return([]string{ + mockStudioReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockStudioReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() + mockStudioReader.On("GetAliases", testCtx, studioID).Return([]string{ studioName, }, nil).Once() - mockStudioReader.On("GetAliases", reversedStudioID).Return([]string{}, nil).Once() + mockStudioReader.On("GetAliases", testCtx, reversedStudioID).Return([]string{}, nil).Once() doTest(mockStudioReader, mockGalleryReader, test) } @@ -154,15 +160,20 @@ func TestGalleryTags(t *testing.T) { doTest := func(mockTagReader *mocks.TagReaderWriter, mockGalleryReader *mocks.GalleryReaderWriter, test pathTestTable) { if test.Matches { - mockGalleryReader.On("GetTagIDs", galleryID).Return(nil, nil).Once() - mockGalleryReader.On("UpdateTags", galleryID, []int{tagID}).Return(nil).Once() + mockGalleryReader.On("UpdatePartial", testCtx, galleryID, models.GalleryPartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } gallery := models.Gallery{ - ID: galleryID, - Path: models.NullString(test.Path), + ID: galleryID, + Path: test.Path, + TagIDs: models.NewRelatedIDs([]int{}), } - err := GalleryTags(&gallery, mockGalleryReader, mockTagReader, nil) + err := GalleryTags(testCtx, &gallery, mockGalleryReader, mockTagReader, nil) assert.Nil(err) mockTagReader.AssertExpectations(t) @@ -173,9 +184,9 @@ func TestGalleryTags(t *testing.T) { mockTagReader := &mocks.TagReaderWriter{} mockGalleryReader := &mocks.GalleryReaderWriter{} - mockTagReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockTagReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() - mockTagReader.On("GetAliases", mock.Anything).Return([]string{}, nil).Maybe() + mockTagReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockTagReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() + mockTagReader.On("GetAliases", testCtx, mock.Anything).Return([]string{}, nil).Maybe() doTest(mockTagReader, mockGalleryReader, test) } @@ -187,12 +198,12 @@ func TestGalleryTags(t *testing.T) { mockTagReader := &mocks.TagReaderWriter{} mockGalleryReader := &mocks.GalleryReaderWriter{} - mockTagReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockTagReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() - mockTagReader.On("GetAliases", tagID).Return([]string{ + mockTagReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockTagReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() + mockTagReader.On("GetAliases", testCtx, tagID).Return([]string{ tagName, }, nil).Once() - mockTagReader.On("GetAliases", reversedTagID).Return([]string{}, nil).Once() + mockTagReader.On("GetAliases", testCtx, reversedTagID).Return([]string{}, nil).Once() doTest(mockTagReader, mockGalleryReader, test) } diff --git a/internal/autotag/image.go b/internal/autotag/image.go index 516f30181..404640786 100644 --- a/internal/autotag/image.go +++ b/internal/autotag/image.go @@ -1,51 +1,90 @@ package autotag import ( + "context" + "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) +type ImagePerformerUpdater interface { + models.PerformerIDLoader + image.PartialUpdater +} + +type ImageTagUpdater interface { + models.TagIDLoader + image.PartialUpdater +} + func getImageFileTagger(s *models.Image, cache *match.Cache) tagger { return tagger{ ID: s.ID, Type: "image", - Name: s.GetTitle(), + Name: s.DisplayName(), Path: s.Path, cache: cache, } } // ImagePerformers tags the provided image with performers whose name matches the image's path. -func ImagePerformers(s *models.Image, rw models.ImageReaderWriter, performerReader models.PerformerReader, cache *match.Cache) error { +func ImagePerformers(ctx context.Context, s *models.Image, rw ImagePerformerUpdater, performerReader match.PerformerAutoTagQueryer, cache *match.Cache) error { t := getImageFileTagger(s, cache) - return t.tagPerformers(performerReader, func(subjectID, otherID int) (bool, error) { - return image.AddPerformer(rw, subjectID, otherID) + return t.tagPerformers(ctx, performerReader, func(subjectID, otherID int) (bool, error) { + if err := s.LoadPerformerIDs(ctx, rw); err != nil { + return false, err + } + existing := s.PerformerIDs.List() + + if intslice.IntInclude(existing, otherID) { + return false, nil + } + + if err := image.AddPerformer(ctx, rw, s, otherID); err != nil { + return false, err + } + + return true, nil }) } // ImageStudios tags the provided image with the first studio whose name matches the image's path. // // Images will not be tagged if studio is already set. -func ImageStudios(s *models.Image, rw models.ImageReaderWriter, studioReader models.StudioReader, cache *match.Cache) error { - if s.StudioID.Valid { +func ImageStudios(ctx context.Context, s *models.Image, rw ImageFinderUpdater, studioReader match.StudioAutoTagQueryer, cache *match.Cache) error { + if s.StudioID != nil { // don't modify return nil } t := getImageFileTagger(s, cache) - return t.tagStudios(studioReader, func(subjectID, otherID int) (bool, error) { - return addImageStudio(rw, subjectID, otherID) + return t.tagStudios(ctx, studioReader, func(subjectID, otherID int) (bool, error) { + return addImageStudio(ctx, rw, s, otherID) }) } // ImageTags tags the provided image with tags whose name matches the image's path. -func ImageTags(s *models.Image, rw models.ImageReaderWriter, tagReader models.TagReader, cache *match.Cache) error { +func ImageTags(ctx context.Context, s *models.Image, rw ImageTagUpdater, tagReader match.TagAutoTagQueryer, cache *match.Cache) error { t := getImageFileTagger(s, cache) - return t.tagTags(tagReader, func(subjectID, otherID int) (bool, error) { - return image.AddTag(rw, subjectID, otherID) + return t.tagTags(ctx, tagReader, func(subjectID, otherID int) (bool, error) { + if err := s.LoadTagIDs(ctx, rw); err != nil { + return false, err + } + existing := s.TagIDs.List() + + if intslice.IntInclude(existing, otherID) { + return false, nil + } + + if err := image.AddTag(ctx, rw, s, otherID); err != nil { + return false, err + } + + return true, nil }) } diff --git a/internal/autotag/image_test.go b/internal/autotag/image_test.go index 130ce51af..653cb2c2d 100644 --- a/internal/autotag/image_test.go +++ b/internal/autotag/image_test.go @@ -37,19 +37,24 @@ func TestImagePerformers(t *testing.T) { mockPerformerReader := &mocks.PerformerReaderWriter{} mockImageReader := &mocks.ImageReaderWriter{} - mockPerformerReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockPerformerReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Performer{&performer, &reversedPerformer}, nil).Once() + mockPerformerReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockPerformerReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Performer{&performer, &reversedPerformer}, nil).Once() if test.Matches { - mockImageReader.On("GetPerformerIDs", imageID).Return(nil, nil).Once() - mockImageReader.On("UpdatePerformers", imageID, []int{performerID}).Return(nil).Once() + mockImageReader.On("UpdatePartial", testCtx, imageID, models.ImagePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } image := models.Image{ - ID: imageID, - Path: test.Path, + ID: imageID, + Path: test.Path, + PerformerIDs: models.NewRelatedIDs([]int{}), } - err := ImagePerformers(&image, mockImageReader, mockPerformerReader, nil) + err := ImagePerformers(testCtx, &image, mockImageReader, mockPerformerReader, nil) assert.Nil(err) mockPerformerReader.AssertExpectations(t) @@ -62,7 +67,7 @@ func TestImageStudios(t *testing.T) { const imageID = 1 const studioName = "studio name" - const studioID = 2 + var studioID = 2 studio := models.Studio{ ID: studioID, Name: models.NullString(studioName), @@ -81,11 +86,9 @@ func TestImageStudios(t *testing.T) { doTest := func(mockStudioReader *mocks.StudioReaderWriter, mockImageReader *mocks.ImageReaderWriter, test pathTestTable) { if test.Matches { - mockImageReader.On("Find", imageID).Return(&models.Image{}, nil).Once() - expectedStudioID := models.NullInt64(studioID) - mockImageReader.On("Update", models.ImagePartial{ - ID: imageID, - StudioID: &expectedStudioID, + expectedStudioID := studioID + mockImageReader.On("UpdatePartial", testCtx, imageID, models.ImagePartial{ + StudioID: models.NewOptionalInt(expectedStudioID), }).Return(nil, nil).Once() } @@ -93,7 +96,7 @@ func TestImageStudios(t *testing.T) { ID: imageID, Path: test.Path, } - err := ImageStudios(&image, mockImageReader, mockStudioReader, nil) + err := ImageStudios(testCtx, &image, mockImageReader, mockStudioReader, nil) assert.Nil(err) mockStudioReader.AssertExpectations(t) @@ -104,9 +107,9 @@ func TestImageStudios(t *testing.T) { mockStudioReader := &mocks.StudioReaderWriter{} mockImageReader := &mocks.ImageReaderWriter{} - mockStudioReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockStudioReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() - mockStudioReader.On("GetAliases", mock.Anything).Return([]string{}, nil).Maybe() + mockStudioReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockStudioReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() + mockStudioReader.On("GetAliases", testCtx, mock.Anything).Return([]string{}, nil).Maybe() doTest(mockStudioReader, mockImageReader, test) } @@ -119,12 +122,12 @@ func TestImageStudios(t *testing.T) { mockStudioReader := &mocks.StudioReaderWriter{} mockImageReader := &mocks.ImageReaderWriter{} - mockStudioReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockStudioReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() - mockStudioReader.On("GetAliases", studioID).Return([]string{ + mockStudioReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockStudioReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() + mockStudioReader.On("GetAliases", testCtx, studioID).Return([]string{ studioName, }, nil).Once() - mockStudioReader.On("GetAliases", reversedStudioID).Return([]string{}, nil).Once() + mockStudioReader.On("GetAliases", testCtx, reversedStudioID).Return([]string{}, nil).Once() doTest(mockStudioReader, mockImageReader, test) } @@ -154,15 +157,20 @@ func TestImageTags(t *testing.T) { doTest := func(mockTagReader *mocks.TagReaderWriter, mockImageReader *mocks.ImageReaderWriter, test pathTestTable) { if test.Matches { - mockImageReader.On("GetTagIDs", imageID).Return(nil, nil).Once() - mockImageReader.On("UpdateTags", imageID, []int{tagID}).Return(nil).Once() + mockImageReader.On("UpdatePartial", testCtx, imageID, models.ImagePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } image := models.Image{ - ID: imageID, - Path: test.Path, + ID: imageID, + Path: test.Path, + TagIDs: models.NewRelatedIDs([]int{}), } - err := ImageTags(&image, mockImageReader, mockTagReader, nil) + err := ImageTags(testCtx, &image, mockImageReader, mockTagReader, nil) assert.Nil(err) mockTagReader.AssertExpectations(t) @@ -173,9 +181,9 @@ func TestImageTags(t *testing.T) { mockTagReader := &mocks.TagReaderWriter{} mockImageReader := &mocks.ImageReaderWriter{} - mockTagReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockTagReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() - mockTagReader.On("GetAliases", mock.Anything).Return([]string{}, nil).Maybe() + mockTagReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockTagReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() + mockTagReader.On("GetAliases", testCtx, mock.Anything).Return([]string{}, nil).Maybe() doTest(mockTagReader, mockImageReader, test) } @@ -188,12 +196,12 @@ func TestImageTags(t *testing.T) { mockTagReader := &mocks.TagReaderWriter{} mockImageReader := &mocks.ImageReaderWriter{} - mockTagReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockTagReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() - mockTagReader.On("GetAliases", tagID).Return([]string{ + mockTagReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockTagReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() + mockTagReader.On("GetAliases", testCtx, tagID).Return([]string{ tagName, }, nil).Once() - mockTagReader.On("GetAliases", reversedTagID).Return([]string{}, nil).Once() + mockTagReader.On("GetAliases", testCtx, reversedTagID).Return([]string{}, nil).Once() doTest(mockTagReader, mockImageReader, test) } diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index 9ca176d4b..7c5952652 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -8,28 +8,37 @@ import ( "database/sql" "fmt" "os" + "path/filepath" "testing" - "github.com/stashapp/stash/pkg/database" - "github.com/stashapp/stash/pkg/hash/md5" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/sqlite" + "github.com/stashapp/stash/pkg/txn" _ "github.com/golang-migrate/migrate/v4/database/sqlite3" _ "github.com/golang-migrate/migrate/v4/source/file" + + // necessary to register custom migrations + _ "github.com/stashapp/stash/pkg/sqlite/migrations" ) const testName = "Foo's Bar" const existingStudioName = "ExistingStudio" const existingStudioSceneName = testName + ".dontChangeStudio.mp4" -const existingStudioImageName = testName + ".dontChangeStudio.mp4" -const existingStudioGalleryName = testName + ".dontChangeStudio.mp4" +const existingStudioImageName = testName + ".dontChangeStudio.png" +const existingStudioGalleryName = testName + ".dontChangeStudio.zip" var existingStudioID int +const expectedMatchTitle = "expected match" + +var db *sqlite.Database +var r models.Repository + func testTeardown(databaseFile string) { - err := database.DB.Close() + err := db.Close() if err != nil { panic(err) @@ -50,10 +59,13 @@ func runTests(m *testing.M) int { f.Close() databaseFile := f.Name() - if err := database.Initialize(databaseFile); err != nil { + db = sqlite.NewDatabase() + if err := db.Open(databaseFile); err != nil { panic(fmt.Sprintf("Could not initialize database: %s", err.Error())) } + r = db.TxnRepository() + // defer close and delete the database defer testTeardown(databaseFile) @@ -71,7 +83,7 @@ func TestMain(m *testing.M) { os.Exit(ret) } -func createPerformer(pqb models.PerformerWriter) error { +func createPerformer(ctx context.Context, pqb models.PerformerWriter) error { // create the performer performer := models.Performer{ Checksum: testName, @@ -79,7 +91,7 @@ func createPerformer(pqb models.PerformerWriter) error { Favorite: sql.NullBool{Valid: true, Bool: false}, } - _, err := pqb.Create(performer) + _, err := pqb.Create(ctx, performer) if err != nil { return err } @@ -87,23 +99,23 @@ func createPerformer(pqb models.PerformerWriter) error { return nil } -func createStudio(qb models.StudioWriter, name string) (*models.Studio, error) { +func createStudio(ctx context.Context, qb models.StudioWriter, name string) (*models.Studio, error) { // create the studio studio := models.Studio{ Checksum: name, Name: sql.NullString{Valid: true, String: name}, } - return qb.Create(studio) + return qb.Create(ctx, studio) } -func createTag(qb models.TagWriter) error { +func createTag(ctx context.Context, qb models.TagWriter) error { // create the studio tag := models.Tag{ Name: testName, } - _, err := qb.Create(tag) + _, err := qb.Create(ctx, tag) if err != nil { return err } @@ -111,233 +123,399 @@ func createTag(qb models.TagWriter) error { return nil } -func createScenes(sqb models.SceneReaderWriter) error { +func createScenes(ctx context.Context, sqb models.SceneReaderWriter, folderStore file.FolderStore, fileStore file.Store) error { // create the scenes scenePatterns, falseScenePatterns := generateTestPaths(testName, sceneExt) for _, fn := range scenePatterns { - err := createScene(sqb, makeScene(fn, true)) + f, err := createSceneFile(ctx, fn, folderStore, fileStore) if err != nil { return err } + + const expectedResult = true + if err := createScene(ctx, sqb, makeScene(expectedResult), f); err != nil { + return err + } } + for _, fn := range falseScenePatterns { - err := createScene(sqb, makeScene(fn, false)) + f, err := createSceneFile(ctx, fn, folderStore, fileStore) if err != nil { return err } + + const expectedResult = false + if err := createScene(ctx, sqb, makeScene(expectedResult), f); err != nil { + return err + } } // add organized scenes for _, fn := range scenePatterns { - s := makeScene("organized"+fn, false) - s.Organized = true - err := createScene(sqb, s) + f, err := createSceneFile(ctx, "organized"+fn, folderStore, fileStore) if err != nil { return err } + + const expectedResult = false + s := makeScene(expectedResult) + s.Organized = true + if err := createScene(ctx, sqb, s, f); err != nil { + return err + } } // create scene with existing studio io - studioScene := makeScene(existingStudioSceneName, true) - studioScene.StudioID = sql.NullInt64{Valid: true, Int64: int64(existingStudioID)} - err := createScene(sqb, studioScene) + f, err := createSceneFile(ctx, existingStudioSceneName, folderStore, fileStore) if err != nil { return err } + s := &models.Scene{ + Title: expectedMatchTitle, + URL: existingStudioSceneName, + StudioID: &existingStudioID, + } + if err := createScene(ctx, sqb, s, f); err != nil { + return err + } + return nil } -func makeScene(name string, expectedResult bool) *models.Scene { - scene := &models.Scene{ - Checksum: sql.NullString{String: md5.FromString(name), Valid: true}, - Path: name, - } +func makeScene(expectedResult bool) *models.Scene { + s := &models.Scene{} // if expectedResult is true then we expect it to match, set the title accordingly if expectedResult { - scene.Title = sql.NullString{Valid: true, String: name} + s.Title = expectedMatchTitle } - return scene + return s } -func createScene(sqb models.SceneWriter, scene *models.Scene) error { - _, err := sqb.Create(*scene) +func createSceneFile(ctx context.Context, name string, folderStore file.FolderStore, fileStore file.Store) (*file.VideoFile, error) { + folderPath := filepath.Dir(name) + basename := filepath.Base(name) + + folder, err := getOrCreateFolder(ctx, folderStore, folderPath) + if err != nil { + return nil, err + } + + folderID := folder.ID + + f := &file.VideoFile{ + BaseFile: &file.BaseFile{ + Basename: basename, + ParentFolderID: folderID, + }, + } + + if err := fileStore.Create(ctx, f); err != nil { + return nil, fmt.Errorf("creating scene file %q: %w", name, err) + } + + return f, nil +} + +func getOrCreateFolder(ctx context.Context, folderStore file.FolderStore, folderPath string) (*file.Folder, error) { + f, err := folderStore.FindByPath(ctx, folderPath) + if err != nil { + return nil, fmt.Errorf("getting folder by path: %w", err) + } + + if f != nil { + return f, nil + } + + var parentID file.FolderID + dir := filepath.Dir(folderPath) + if dir != "." { + parent, err := getOrCreateFolder(ctx, folderStore, dir) + if err != nil { + return nil, err + } + + parentID = parent.ID + } + + f = &file.Folder{ + Path: folderPath, + } + + if parentID != 0 { + f.ParentFolderID = &parentID + } + + if err := folderStore.Create(ctx, f); err != nil { + return nil, fmt.Errorf("creating folder: %w", err) + } + + return f, nil +} + +func createScene(ctx context.Context, sqb models.SceneWriter, s *models.Scene, f *file.VideoFile) error { + err := sqb.Create(ctx, s, []file.ID{f.ID}) if err != nil { - return fmt.Errorf("Failed to create scene with name '%s': %s", scene.Path, err.Error()) + return fmt.Errorf("Failed to create scene with path '%s': %s", f.Path, err.Error()) } return nil } -func createImages(sqb models.ImageReaderWriter) error { +func createImages(ctx context.Context, w models.ImageReaderWriter, folderStore file.FolderStore, fileStore file.Store) error { // create the images imagePatterns, falseImagePatterns := generateTestPaths(testName, imageExt) for _, fn := range imagePatterns { - err := createImage(sqb, makeImage(fn, true)) + f, err := createImageFile(ctx, fn, folderStore, fileStore) if err != nil { return err } + + const expectedResult = true + if err := createImage(ctx, w, makeImage(expectedResult), f); err != nil { + return err + } } for _, fn := range falseImagePatterns { - err := createImage(sqb, makeImage(fn, false)) + f, err := createImageFile(ctx, fn, folderStore, fileStore) if err != nil { return err } + + const expectedResult = false + if err := createImage(ctx, w, makeImage(expectedResult), f); err != nil { + return err + } } // add organized images for _, fn := range imagePatterns { - s := makeImage("organized"+fn, false) - s.Organized = true - err := createImage(sqb, s) + f, err := createImageFile(ctx, "organized"+fn, folderStore, fileStore) if err != nil { return err } + + const expectedResult = false + s := makeImage(expectedResult) + s.Organized = true + if err := createImage(ctx, w, s, f); err != nil { + return err + } } // create image with existing studio io - studioImage := makeImage(existingStudioImageName, true) - studioImage.StudioID = sql.NullInt64{Valid: true, Int64: int64(existingStudioID)} - err := createImage(sqb, studioImage) + f, err := createImageFile(ctx, existingStudioImageName, folderStore, fileStore) if err != nil { return err } + s := &models.Image{ + Title: existingStudioImageName, + StudioID: &existingStudioID, + } + if err := createImage(ctx, w, s, f); err != nil { + return err + } + return nil } -func makeImage(name string, expectedResult bool) *models.Image { - image := &models.Image{ - Checksum: md5.FromString(name), - Path: name, +func createImageFile(ctx context.Context, name string, folderStore file.FolderStore, fileStore file.Store) (*file.ImageFile, error) { + folderPath := filepath.Dir(name) + basename := filepath.Base(name) + + folder, err := getOrCreateFolder(ctx, folderStore, folderPath) + if err != nil { + return nil, err } + folderID := folder.ID + + f := &file.ImageFile{ + BaseFile: &file.BaseFile{ + Basename: basename, + ParentFolderID: folderID, + }, + } + + if err := fileStore.Create(ctx, f); err != nil { + return nil, err + } + + return f, nil +} + +func makeImage(expectedResult bool) *models.Image { + o := &models.Image{} + // if expectedResult is true then we expect it to match, set the title accordingly if expectedResult { - image.Title = sql.NullString{Valid: true, String: name} + o.Title = expectedMatchTitle } - return image + return o } -func createImage(sqb models.ImageWriter, image *models.Image) error { - _, err := sqb.Create(*image) +func createImage(ctx context.Context, w models.ImageWriter, o *models.Image, f *file.ImageFile) error { + err := w.Create(ctx, &models.ImageCreateInput{ + Image: o, + FileIDs: []file.ID{f.ID}, + }) if err != nil { - return fmt.Errorf("Failed to create image with name '%s': %s", image.Path, err.Error()) + return fmt.Errorf("Failed to create image with path '%s': %s", f.Path, err.Error()) } return nil } -func createGalleries(sqb models.GalleryReaderWriter) error { +func createGalleries(ctx context.Context, w models.GalleryReaderWriter, folderStore file.FolderStore, fileStore file.Store) error { // create the galleries galleryPatterns, falseGalleryPatterns := generateTestPaths(testName, galleryExt) for _, fn := range galleryPatterns { - err := createGallery(sqb, makeGallery(fn, true)) + f, err := createGalleryFile(ctx, fn, folderStore, fileStore) if err != nil { return err } + + const expectedResult = true + if err := createGallery(ctx, w, makeGallery(expectedResult), f); err != nil { + return err + } } for _, fn := range falseGalleryPatterns { - err := createGallery(sqb, makeGallery(fn, false)) + f, err := createGalleryFile(ctx, fn, folderStore, fileStore) if err != nil { return err } + + const expectedResult = false + if err := createGallery(ctx, w, makeGallery(expectedResult), f); err != nil { + return err + } } // add organized galleries for _, fn := range galleryPatterns { - s := makeGallery("organized"+fn, false) - s.Organized = true - err := createGallery(sqb, s) + f, err := createGalleryFile(ctx, "organized"+fn, folderStore, fileStore) if err != nil { return err } + + const expectedResult = false + s := makeGallery(expectedResult) + s.Organized = true + if err := createGallery(ctx, w, s, f); err != nil { + return err + } } // create gallery with existing studio io - studioGallery := makeGallery(existingStudioGalleryName, true) - studioGallery.StudioID = sql.NullInt64{Valid: true, Int64: int64(existingStudioID)} - err := createGallery(sqb, studioGallery) + f, err := createGalleryFile(ctx, existingStudioGalleryName, folderStore, fileStore) if err != nil { return err } + s := &models.Gallery{ + Title: existingStudioGalleryName, + StudioID: &existingStudioID, + } + if err := createGallery(ctx, w, s, f); err != nil { + return err + } + return nil } -func makeGallery(name string, expectedResult bool) *models.Gallery { - gallery := &models.Gallery{ - Checksum: md5.FromString(name), - Path: models.NullString(name), +func createGalleryFile(ctx context.Context, name string, folderStore file.FolderStore, fileStore file.Store) (*file.BaseFile, error) { + folderPath := filepath.Dir(name) + basename := filepath.Base(name) + + folder, err := getOrCreateFolder(ctx, folderStore, folderPath) + if err != nil { + return nil, err } + folderID := folder.ID + + f := &file.BaseFile{ + Basename: basename, + ParentFolderID: folderID, + } + + if err := fileStore.Create(ctx, f); err != nil { + return nil, err + } + + return f, nil +} + +func makeGallery(expectedResult bool) *models.Gallery { + o := &models.Gallery{} + // if expectedResult is true then we expect it to match, set the title accordingly if expectedResult { - gallery.Title = sql.NullString{Valid: true, String: name} + o.Title = expectedMatchTitle } - return gallery + return o } -func createGallery(sqb models.GalleryWriter, gallery *models.Gallery) error { - _, err := sqb.Create(*gallery) - +func createGallery(ctx context.Context, w models.GalleryWriter, o *models.Gallery, f *file.BaseFile) error { + err := w.Create(ctx, o, []file.ID{f.ID}) if err != nil { - return fmt.Errorf("Failed to create gallery with name '%s': %s", gallery.Path.String, err.Error()) + return fmt.Errorf("Failed to create gallery with path '%s': %s", f.Path, err.Error()) } return nil } -func withTxn(f func(r models.Repository) error) error { - t := sqlite.NewTransactionManager() - return t.WithTxn(context.TODO(), f) +func withTxn(f func(ctx context.Context) error) error { + return txn.WithTxn(context.TODO(), db, f) } func populateDB() error { - if err := withTxn(func(r models.Repository) error { - err := createPerformer(r.Performer()) + if err := withTxn(func(ctx context.Context) error { + err := createPerformer(ctx, r.Performer) if err != nil { return err } - _, err = createStudio(r.Studio(), testName) + _, err = createStudio(ctx, r.Studio, testName) if err != nil { return err } // create existing studio - existingStudio, err := createStudio(r.Studio(), existingStudioName) + existingStudio, err := createStudio(ctx, r.Studio, existingStudioName) if err != nil { return err } existingStudioID = existingStudio.ID - err = createTag(r.Tag()) + err = createTag(ctx, r.Tag) if err != nil { return err } - err = createScenes(r.Scene()) + err = createScenes(ctx, r.Scene, r.Folder, r.File) if err != nil { return err } - err = createImages(r.Image()) + err = createImages(ctx, r.Image, r.Folder, r.File) if err != nil { return err } - err = createGalleries(r.Gallery()) + err = createGalleries(ctx, r.Gallery, r.Folder, r.File) if err != nil { return err } @@ -352,9 +530,9 @@ func populateDB() error { func TestParsePerformerScenes(t *testing.T) { var performers []*models.Performer - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { var err error - performers, err = r.Performer().All() + performers, err = r.Performer.All(ctx) return err }); err != nil { t.Errorf("Error getting performer: %s", err) @@ -362,33 +540,33 @@ func TestParsePerformerScenes(t *testing.T) { } for _, p := range performers { - if err := withTxn(func(r models.Repository) error { - return PerformerScenes(p, nil, r.Scene(), nil) + if err := withTxn(func(ctx context.Context) error { + return PerformerScenes(ctx, p, nil, r.Scene, nil) }); err != nil { t.Errorf("Error auto-tagging performers: %s", err) } } // verify that scenes were tagged correctly - withTxn(func(r models.Repository) error { - pqb := r.Performer() + withTxn(func(ctx context.Context) error { + pqb := r.Performer - scenes, err := r.Scene().All() + scenes, err := r.Scene.All(ctx) if err != nil { t.Error(err.Error()) } for _, scene := range scenes { - performers, err := pqb.FindBySceneID(scene.ID) + performers, err := pqb.FindBySceneID(ctx, scene.ID) if err != nil { t.Errorf("Error getting scene performers: %s", err.Error()) } // title is only set on scenes where we expect performer to be set - if scene.Title.String == scene.Path && len(performers) == 0 { + if scene.Title == expectedMatchTitle && len(performers) == 0 { t.Errorf("Did not set performer '%s' for path '%s'", testName, scene.Path) - } else if scene.Title.String != scene.Path && len(performers) > 0 { + } else if scene.Title != expectedMatchTitle && len(performers) > 0 { t.Errorf("Incorrectly set performer '%s' for path '%s'", testName, scene.Path) } } @@ -399,9 +577,9 @@ func TestParsePerformerScenes(t *testing.T) { func TestParseStudioScenes(t *testing.T) { var studios []*models.Studio - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { var err error - studios, err = r.Studio().All() + studios, err = r.Studio.All(ctx) return err }); err != nil { t.Errorf("Error getting studio: %s", err) @@ -409,41 +587,41 @@ func TestParseStudioScenes(t *testing.T) { } for _, s := range studios { - if err := withTxn(func(r models.Repository) error { - aliases, err := r.Studio().GetAliases(s.ID) + if err := withTxn(func(ctx context.Context) error { + aliases, err := r.Studio.GetAliases(ctx, s.ID) if err != nil { return err } - return StudioScenes(s, nil, aliases, r.Scene(), nil) + return StudioScenes(ctx, s, nil, aliases, r.Scene, nil) }); err != nil { t.Errorf("Error auto-tagging performers: %s", err) } } // verify that scenes were tagged correctly - withTxn(func(r models.Repository) error { - scenes, err := r.Scene().All() + withTxn(func(ctx context.Context) error { + scenes, err := r.Scene.All(ctx) if err != nil { t.Error(err.Error()) } for _, scene := range scenes { // check for existing studio id scene first - if scene.Path == existingStudioSceneName { - if scene.StudioID.Int64 != int64(existingStudioID) { + if scene.URL == existingStudioSceneName { + if scene.StudioID == nil || *scene.StudioID != existingStudioID { t.Error("Incorrectly overwrote studio ID for scene with existing studio ID") } } else { // title is only set on scenes where we expect studio to be set - if scene.Title.String == scene.Path { - if !scene.StudioID.Valid { + if scene.Title == expectedMatchTitle { + if scene.StudioID == nil { t.Errorf("Did not set studio '%s' for path '%s'", testName, scene.Path) - } else if scene.StudioID.Int64 != int64(studios[1].ID) { - t.Errorf("Incorrect studio id %d set for path '%s'", scene.StudioID.Int64, scene.Path) + } else if scene.StudioID != nil && *scene.StudioID != studios[1].ID { + t.Errorf("Incorrect studio id %d set for path '%s'", scene.StudioID, scene.Path) } - } else if scene.Title.String != scene.Path && scene.StudioID.Int64 == int64(studios[1].ID) { + } else if scene.Title != expectedMatchTitle && scene.StudioID != nil && *scene.StudioID == studios[1].ID { t.Errorf("Incorrectly set studio '%s' for path '%s'", testName, scene.Path) } } @@ -455,9 +633,9 @@ func TestParseStudioScenes(t *testing.T) { func TestParseTagScenes(t *testing.T) { var tags []*models.Tag - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { var err error - tags, err = r.Tag().All() + tags, err = r.Tag.All(ctx) return err }); err != nil { t.Errorf("Error getting performer: %s", err) @@ -465,38 +643,38 @@ func TestParseTagScenes(t *testing.T) { } for _, s := range tags { - if err := withTxn(func(r models.Repository) error { - aliases, err := r.Tag().GetAliases(s.ID) + if err := withTxn(func(ctx context.Context) error { + aliases, err := r.Tag.GetAliases(ctx, s.ID) if err != nil { return err } - return TagScenes(s, nil, aliases, r.Scene(), nil) + return TagScenes(ctx, s, nil, aliases, r.Scene, nil) }); err != nil { t.Errorf("Error auto-tagging performers: %s", err) } } // verify that scenes were tagged correctly - withTxn(func(r models.Repository) error { - scenes, err := r.Scene().All() + withTxn(func(ctx context.Context) error { + scenes, err := r.Scene.All(ctx) if err != nil { t.Error(err.Error()) } - tqb := r.Tag() + tqb := r.Tag for _, scene := range scenes { - tags, err := tqb.FindBySceneID(scene.ID) + tags, err := tqb.FindBySceneID(ctx, scene.ID) if err != nil { t.Errorf("Error getting scene tags: %s", err.Error()) } // title is only set on scenes where we expect tag to be set - if scene.Title.String == scene.Path && len(tags) == 0 { + if scene.Title == expectedMatchTitle && len(tags) == 0 { t.Errorf("Did not set tag '%s' for path '%s'", testName, scene.Path) - } else if scene.Title.String != scene.Path && len(tags) > 0 { + } else if (scene.Title != expectedMatchTitle) && len(tags) > 0 { t.Errorf("Incorrectly set tag '%s' for path '%s'", testName, scene.Path) } } @@ -507,9 +685,9 @@ func TestParseTagScenes(t *testing.T) { func TestParsePerformerImages(t *testing.T) { var performers []*models.Performer - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { var err error - performers, err = r.Performer().All() + performers, err = r.Performer.All(ctx) return err }); err != nil { t.Errorf("Error getting performer: %s", err) @@ -517,33 +695,34 @@ func TestParsePerformerImages(t *testing.T) { } for _, p := range performers { - if err := withTxn(func(r models.Repository) error { - return PerformerImages(p, nil, r.Image(), nil) + if err := withTxn(func(ctx context.Context) error { + return PerformerImages(ctx, p, nil, r.Image, nil) }); err != nil { t.Errorf("Error auto-tagging performers: %s", err) } } // verify that images were tagged correctly - withTxn(func(r models.Repository) error { - pqb := r.Performer() + withTxn(func(ctx context.Context) error { + pqb := r.Performer - images, err := r.Image().All() + images, err := r.Image.All(ctx) if err != nil { t.Error(err.Error()) } for _, image := range images { - performers, err := pqb.FindByImageID(image.ID) + performers, err := pqb.FindByImageID(ctx, image.ID) if err != nil { t.Errorf("Error getting image performers: %s", err.Error()) } // title is only set on images where we expect performer to be set - if image.Title.String == image.Path && len(performers) == 0 { + expectedMatch := image.Title == expectedMatchTitle || image.Title == existingStudioImageName + if expectedMatch && len(performers) == 0 { t.Errorf("Did not set performer '%s' for path '%s'", testName, image.Path) - } else if image.Title.String != image.Path && len(performers) > 0 { + } else if !expectedMatch && len(performers) > 0 { t.Errorf("Incorrectly set performer '%s' for path '%s'", testName, image.Path) } } @@ -554,9 +733,9 @@ func TestParsePerformerImages(t *testing.T) { func TestParseStudioImages(t *testing.T) { var studios []*models.Studio - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { var err error - studios, err = r.Studio().All() + studios, err = r.Studio.All(ctx) return err }); err != nil { t.Errorf("Error getting studio: %s", err) @@ -564,41 +743,41 @@ func TestParseStudioImages(t *testing.T) { } for _, s := range studios { - if err := withTxn(func(r models.Repository) error { - aliases, err := r.Studio().GetAliases(s.ID) + if err := withTxn(func(ctx context.Context) error { + aliases, err := r.Studio.GetAliases(ctx, s.ID) if err != nil { return err } - return StudioImages(s, nil, aliases, r.Image(), nil) + return StudioImages(ctx, s, nil, aliases, r.Image, nil) }); err != nil { t.Errorf("Error auto-tagging performers: %s", err) } } // verify that images were tagged correctly - withTxn(func(r models.Repository) error { - images, err := r.Image().All() + withTxn(func(ctx context.Context) error { + images, err := r.Image.All(ctx) if err != nil { t.Error(err.Error()) } for _, image := range images { // check for existing studio id image first - if image.Path == existingStudioImageName { - if image.StudioID.Int64 != int64(existingStudioID) { + if image.Title == existingStudioImageName { + if *image.StudioID != existingStudioID { t.Error("Incorrectly overwrote studio ID for image with existing studio ID") } } else { // title is only set on images where we expect studio to be set - if image.Title.String == image.Path { - if !image.StudioID.Valid { + if image.Title == expectedMatchTitle { + if image.StudioID == nil { t.Errorf("Did not set studio '%s' for path '%s'", testName, image.Path) - } else if image.StudioID.Int64 != int64(studios[1].ID) { - t.Errorf("Incorrect studio id %d set for path '%s'", image.StudioID.Int64, image.Path) + } else if *image.StudioID != studios[1].ID { + t.Errorf("Incorrect studio id %d set for path '%s'", *image.StudioID, image.Path) } - } else if image.Title.String != image.Path && image.StudioID.Int64 == int64(studios[1].ID) { + } else if image.Title != expectedMatchTitle && image.StudioID != nil && *image.StudioID == studios[1].ID { t.Errorf("Incorrectly set studio '%s' for path '%s'", testName, image.Path) } } @@ -610,9 +789,9 @@ func TestParseStudioImages(t *testing.T) { func TestParseTagImages(t *testing.T) { var tags []*models.Tag - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { var err error - tags, err = r.Tag().All() + tags, err = r.Tag.All(ctx) return err }); err != nil { t.Errorf("Error getting performer: %s", err) @@ -620,38 +799,39 @@ func TestParseTagImages(t *testing.T) { } for _, s := range tags { - if err := withTxn(func(r models.Repository) error { - aliases, err := r.Tag().GetAliases(s.ID) + if err := withTxn(func(ctx context.Context) error { + aliases, err := r.Tag.GetAliases(ctx, s.ID) if err != nil { return err } - return TagImages(s, nil, aliases, r.Image(), nil) + return TagImages(ctx, s, nil, aliases, r.Image, nil) }); err != nil { t.Errorf("Error auto-tagging performers: %s", err) } } // verify that images were tagged correctly - withTxn(func(r models.Repository) error { - images, err := r.Image().All() + withTxn(func(ctx context.Context) error { + images, err := r.Image.All(ctx) if err != nil { t.Error(err.Error()) } - tqb := r.Tag() + tqb := r.Tag for _, image := range images { - tags, err := tqb.FindByImageID(image.ID) + tags, err := tqb.FindByImageID(ctx, image.ID) if err != nil { t.Errorf("Error getting image tags: %s", err.Error()) } // title is only set on images where we expect performer to be set - if image.Title.String == image.Path && len(tags) == 0 { + expectedMatch := image.Title == expectedMatchTitle || image.Title == existingStudioImageName + if expectedMatch && len(tags) == 0 { t.Errorf("Did not set tag '%s' for path '%s'", testName, image.Path) - } else if image.Title.String != image.Path && len(tags) > 0 { + } else if !expectedMatch && len(tags) > 0 { t.Errorf("Incorrectly set tag '%s' for path '%s'", testName, image.Path) } } @@ -662,9 +842,9 @@ func TestParseTagImages(t *testing.T) { func TestParsePerformerGalleries(t *testing.T) { var performers []*models.Performer - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { var err error - performers, err = r.Performer().All() + performers, err = r.Performer.All(ctx) return err }); err != nil { t.Errorf("Error getting performer: %s", err) @@ -672,34 +852,35 @@ func TestParsePerformerGalleries(t *testing.T) { } for _, p := range performers { - if err := withTxn(func(r models.Repository) error { - return PerformerGalleries(p, nil, r.Gallery(), nil) + if err := withTxn(func(ctx context.Context) error { + return PerformerGalleries(ctx, p, nil, r.Gallery, nil) }); err != nil { t.Errorf("Error auto-tagging performers: %s", err) } } // verify that galleries were tagged correctly - withTxn(func(r models.Repository) error { - pqb := r.Performer() + withTxn(func(ctx context.Context) error { + pqb := r.Performer - galleries, err := r.Gallery().All() + galleries, err := r.Gallery.All(ctx) if err != nil { t.Error(err.Error()) } for _, gallery := range galleries { - performers, err := pqb.FindByGalleryID(gallery.ID) + performers, err := pqb.FindByGalleryID(ctx, gallery.ID) if err != nil { t.Errorf("Error getting gallery performers: %s", err.Error()) } // title is only set on galleries where we expect performer to be set - if gallery.Title.String == gallery.Path.String && len(performers) == 0 { - t.Errorf("Did not set performer '%s' for path '%s'", testName, gallery.Path.String) - } else if gallery.Title.String != gallery.Path.String && len(performers) > 0 { - t.Errorf("Incorrectly set performer '%s' for path '%s'", testName, gallery.Path.String) + expectedMatch := gallery.Title == expectedMatchTitle || gallery.Title == existingStudioGalleryName + if expectedMatch && len(performers) == 0 { + t.Errorf("Did not set performer '%s' for path '%s'", testName, gallery.Path) + } else if !expectedMatch && len(performers) > 0 { + t.Errorf("Incorrectly set performer '%s' for path '%s'", testName, gallery.Path) } } @@ -709,9 +890,9 @@ func TestParsePerformerGalleries(t *testing.T) { func TestParseStudioGalleries(t *testing.T) { var studios []*models.Studio - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { var err error - studios, err = r.Studio().All() + studios, err = r.Studio.All(ctx) return err }); err != nil { t.Errorf("Error getting studio: %s", err) @@ -719,42 +900,42 @@ func TestParseStudioGalleries(t *testing.T) { } for _, s := range studios { - if err := withTxn(func(r models.Repository) error { - aliases, err := r.Studio().GetAliases(s.ID) + if err := withTxn(func(ctx context.Context) error { + aliases, err := r.Studio.GetAliases(ctx, s.ID) if err != nil { return err } - return StudioGalleries(s, nil, aliases, r.Gallery(), nil) + return StudioGalleries(ctx, s, nil, aliases, r.Gallery, nil) }); err != nil { t.Errorf("Error auto-tagging performers: %s", err) } } // verify that galleries were tagged correctly - withTxn(func(r models.Repository) error { - galleries, err := r.Gallery().All() + withTxn(func(ctx context.Context) error { + galleries, err := r.Gallery.All(ctx) if err != nil { t.Error(err.Error()) } for _, gallery := range galleries { // check for existing studio id gallery first - if gallery.Path.String == existingStudioGalleryName { - if gallery.StudioID.Int64 != int64(existingStudioID) { + if gallery.Title == existingStudioGalleryName { + if *gallery.StudioID != existingStudioID { t.Error("Incorrectly overwrote studio ID for gallery with existing studio ID") } } else { // title is only set on galleries where we expect studio to be set - if gallery.Title.String == gallery.Path.String { - if !gallery.StudioID.Valid { - t.Errorf("Did not set studio '%s' for path '%s'", testName, gallery.Path.String) - } else if gallery.StudioID.Int64 != int64(studios[1].ID) { - t.Errorf("Incorrect studio id %d set for path '%s'", gallery.StudioID.Int64, gallery.Path.String) + if gallery.Title == expectedMatchTitle { + if gallery.StudioID == nil { + t.Errorf("Did not set studio '%s' for path '%s'", testName, gallery.Path) + } else if *gallery.StudioID != studios[1].ID { + t.Errorf("Incorrect studio id %d set for path '%s'", *gallery.StudioID, gallery.Path) } - } else if gallery.Title.String != gallery.Path.String && gallery.StudioID.Int64 == int64(studios[1].ID) { - t.Errorf("Incorrectly set studio '%s' for path '%s'", testName, gallery.Path.String) + } else if gallery.Title != expectedMatchTitle && (gallery.StudioID != nil && *gallery.StudioID == studios[1].ID) { + t.Errorf("Incorrectly set studio '%s' for path '%s'", testName, gallery.Path) } } } @@ -765,9 +946,9 @@ func TestParseStudioGalleries(t *testing.T) { func TestParseTagGalleries(t *testing.T) { var tags []*models.Tag - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { var err error - tags, err = r.Tag().All() + tags, err = r.Tag.All(ctx) return err }); err != nil { t.Errorf("Error getting performer: %s", err) @@ -775,39 +956,40 @@ func TestParseTagGalleries(t *testing.T) { } for _, s := range tags { - if err := withTxn(func(r models.Repository) error { - aliases, err := r.Tag().GetAliases(s.ID) + if err := withTxn(func(ctx context.Context) error { + aliases, err := r.Tag.GetAliases(ctx, s.ID) if err != nil { return err } - return TagGalleries(s, nil, aliases, r.Gallery(), nil) + return TagGalleries(ctx, s, nil, aliases, r.Gallery, nil) }); err != nil { t.Errorf("Error auto-tagging performers: %s", err) } } // verify that galleries were tagged correctly - withTxn(func(r models.Repository) error { - galleries, err := r.Gallery().All() + withTxn(func(ctx context.Context) error { + galleries, err := r.Gallery.All(ctx) if err != nil { t.Error(err.Error()) } - tqb := r.Tag() + tqb := r.Tag for _, gallery := range galleries { - tags, err := tqb.FindByGalleryID(gallery.ID) + tags, err := tqb.FindByGalleryID(ctx, gallery.ID) if err != nil { t.Errorf("Error getting gallery tags: %s", err.Error()) } // title is only set on galleries where we expect performer to be set - if gallery.Title.String == gallery.Path.String && len(tags) == 0 { - t.Errorf("Did not set tag '%s' for path '%s'", testName, gallery.Path.String) - } else if gallery.Title.String != gallery.Path.String && len(tags) > 0 { - t.Errorf("Incorrectly set tag '%s' for path '%s'", testName, gallery.Path.String) + expectedMatch := gallery.Title == expectedMatchTitle || gallery.Title == existingStudioGalleryName + if expectedMatch && len(tags) == 0 { + t.Errorf("Did not set tag '%s' for path '%s'", testName, gallery.Path) + } else if !expectedMatch && len(tags) > 0 { + t.Errorf("Incorrectly set tag '%s' for path '%s'", testName, gallery.Path) } } diff --git a/internal/autotag/performer.go b/internal/autotag/performer.go index a6c89466a..f240dc0c5 100644 --- a/internal/autotag/performer.go +++ b/internal/autotag/performer.go @@ -1,13 +1,34 @@ package autotag import ( + "context" + "github.com/stashapp/stash/pkg/gallery" "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) +type SceneQueryPerformerUpdater interface { + scene.Queryer + models.PerformerIDLoader + scene.PartialUpdater +} + +type ImageQueryPerformerUpdater interface { + image.Queryer + models.PerformerIDLoader + image.PartialUpdater +} + +type GalleryQueryPerformerUpdater interface { + gallery.Queryer + models.PerformerIDLoader + gallery.PartialUpdater +} + func getPerformerTagger(p *models.Performer, cache *match.Cache) tagger { return tagger{ ID: p.ID, @@ -18,28 +39,67 @@ func getPerformerTagger(p *models.Performer, cache *match.Cache) tagger { } // PerformerScenes searches for scenes whose path matches the provided performer name and tags the scene with the performer. -func PerformerScenes(p *models.Performer, paths []string, rw models.SceneReaderWriter, cache *match.Cache) error { +func PerformerScenes(ctx context.Context, p *models.Performer, paths []string, rw SceneQueryPerformerUpdater, cache *match.Cache) error { t := getPerformerTagger(p, cache) - return t.tagScenes(paths, rw, func(subjectID, otherID int) (bool, error) { - return scene.AddPerformer(rw, otherID, subjectID) + return t.tagScenes(ctx, paths, rw, func(o *models.Scene) (bool, error) { + if err := o.LoadPerformerIDs(ctx, rw); err != nil { + return false, err + } + existing := o.PerformerIDs.List() + + if intslice.IntInclude(existing, p.ID) { + return false, nil + } + + if err := scene.AddPerformer(ctx, rw, o, p.ID); err != nil { + return false, err + } + + return true, nil }) } // PerformerImages searches for images whose path matches the provided performer name and tags the image with the performer. -func PerformerImages(p *models.Performer, paths []string, rw models.ImageReaderWriter, cache *match.Cache) error { +func PerformerImages(ctx context.Context, p *models.Performer, paths []string, rw ImageQueryPerformerUpdater, cache *match.Cache) error { t := getPerformerTagger(p, cache) - return t.tagImages(paths, rw, func(subjectID, otherID int) (bool, error) { - return image.AddPerformer(rw, otherID, subjectID) + return t.tagImages(ctx, paths, rw, func(o *models.Image) (bool, error) { + if err := o.LoadPerformerIDs(ctx, rw); err != nil { + return false, err + } + existing := o.PerformerIDs.List() + + if intslice.IntInclude(existing, p.ID) { + return false, nil + } + + if err := image.AddPerformer(ctx, rw, o, p.ID); err != nil { + return false, err + } + + return true, nil }) } // PerformerGalleries searches for galleries whose path matches the provided performer name and tags the gallery with the performer. -func PerformerGalleries(p *models.Performer, paths []string, rw models.GalleryReaderWriter, cache *match.Cache) error { +func PerformerGalleries(ctx context.Context, p *models.Performer, paths []string, rw GalleryQueryPerformerUpdater, cache *match.Cache) error { t := getPerformerTagger(p, cache) - return t.tagGalleries(paths, rw, func(subjectID, otherID int) (bool, error) { - return gallery.AddPerformer(rw, otherID, subjectID) + return t.tagGalleries(ctx, paths, rw, func(o *models.Gallery) (bool, error) { + if err := o.LoadPerformerIDs(ctx, rw); err != nil { + return false, err + } + existing := o.PerformerIDs.List() + + if intslice.IntInclude(existing, p.ID) { + return false, nil + } + + if err := gallery.AddPerformer(ctx, rw, o, p.ID); err != nil { + return false, err + } + + return true, nil }) } diff --git a/internal/autotag/performer_test.go b/internal/autotag/performer_test.go index 31befd76a..71161cbfe 100644 --- a/internal/autotag/performer_test.go +++ b/internal/autotag/performer_test.go @@ -1,6 +1,7 @@ package autotag import ( + "path/filepath" "testing" "github.com/stashapp/stash/pkg/image" @@ -27,10 +28,14 @@ func TestPerformerScenes(t *testing.T) { "performer + name", `(?i)(?:^|_|[^\p{L}\d])performer[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, }, - { + } + + // trailing backslash tests only work where filepath separator is not backslash + if filepath.Separator != '\\' { + performerNames = append(performerNames, test{ `performer + name\`, `(?i)(?:^|_|[^\p{L}\d])performer[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, - }, + }) } for _, p := range performerNames { @@ -47,8 +52,9 @@ func testPerformerScenes(t *testing.T, performerName, expectedRegex string) { matchingPaths, falsePaths := generateTestPaths(performerName, "mp4") for i, p := range append(matchingPaths, falsePaths...) { scenes = append(scenes, &models.Scene{ - ID: i + 1, - Path: p, + ID: i + 1, + Path: p, + PerformerIDs: models.NewRelatedIDs([]int{}), }) } @@ -72,16 +78,20 @@ func testPerformerScenes(t *testing.T, performerName, expectedRegex string) { PerPage: &perPage, } - mockSceneReader.On("Query", scene.QueryOptions(expectedSceneFilter, expectedFindFilter, false)). + mockSceneReader.On("Query", testCtx, scene.QueryOptions(expectedSceneFilter, expectedFindFilter, false)). Return(mocks.SceneQueryResult(scenes, len(scenes)), nil).Once() for i := range matchingPaths { sceneID := i + 1 - mockSceneReader.On("GetPerformerIDs", sceneID).Return(nil, nil).Once() - mockSceneReader.On("UpdatePerformers", sceneID, []int{performerID}).Return(nil).Once() + mockSceneReader.On("UpdatePartial", testCtx, sceneID, models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } - err := PerformerScenes(&performer, nil, mockSceneReader, nil) + err := PerformerScenes(testCtx, &performer, nil, mockSceneReader, nil) assert := assert.New(t) @@ -122,8 +132,9 @@ func testPerformerImages(t *testing.T, performerName, expectedRegex string) { matchingPaths, falsePaths := generateTestPaths(performerName, imageExt) for i, p := range append(matchingPaths, falsePaths...) { images = append(images, &models.Image{ - ID: i + 1, - Path: p, + ID: i + 1, + Path: p, + PerformerIDs: models.NewRelatedIDs([]int{}), }) } @@ -147,16 +158,20 @@ func testPerformerImages(t *testing.T, performerName, expectedRegex string) { PerPage: &perPage, } - mockImageReader.On("Query", image.QueryOptions(expectedImageFilter, expectedFindFilter, false)). + mockImageReader.On("Query", testCtx, image.QueryOptions(expectedImageFilter, expectedFindFilter, false)). Return(mocks.ImageQueryResult(images, len(images)), nil).Once() for i := range matchingPaths { imageID := i + 1 - mockImageReader.On("GetPerformerIDs", imageID).Return(nil, nil).Once() - mockImageReader.On("UpdatePerformers", imageID, []int{performerID}).Return(nil).Once() + mockImageReader.On("UpdatePartial", testCtx, imageID, models.ImagePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } - err := PerformerImages(&performer, nil, mockImageReader, nil) + err := PerformerImages(testCtx, &performer, nil, mockImageReader, nil) assert := assert.New(t) @@ -196,9 +211,11 @@ func testPerformerGalleries(t *testing.T, performerName, expectedRegex string) { var galleries []*models.Gallery matchingPaths, falsePaths := generateTestPaths(performerName, galleryExt) for i, p := range append(matchingPaths, falsePaths...) { + v := p galleries = append(galleries, &models.Gallery{ - ID: i + 1, - Path: models.NullString(p), + ID: i + 1, + Path: v, + PerformerIDs: models.NewRelatedIDs([]int{}), }) } @@ -222,15 +239,19 @@ func testPerformerGalleries(t *testing.T, performerName, expectedRegex string) { PerPage: &perPage, } - mockGalleryReader.On("Query", expectedGalleryFilter, expectedFindFilter).Return(galleries, len(galleries), nil).Once() + mockGalleryReader.On("Query", testCtx, expectedGalleryFilter, expectedFindFilter).Return(galleries, len(galleries), nil).Once() for i := range matchingPaths { galleryID := i + 1 - mockGalleryReader.On("GetPerformerIDs", galleryID).Return(nil, nil).Once() - mockGalleryReader.On("UpdatePerformers", galleryID, []int{performerID}).Return(nil).Once() + mockGalleryReader.On("UpdatePartial", testCtx, galleryID, models.GalleryPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } - err := PerformerGalleries(&performer, nil, mockGalleryReader, nil) + err := PerformerGalleries(testCtx, &performer, nil, mockGalleryReader, nil) assert := assert.New(t) diff --git a/internal/autotag/scene.go b/internal/autotag/scene.go index cfdcaf393..285ff7d7d 100644 --- a/internal/autotag/scene.go +++ b/internal/autotag/scene.go @@ -1,51 +1,90 @@ package autotag import ( + "context" + "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) +type ScenePerformerUpdater interface { + models.PerformerIDLoader + scene.PartialUpdater +} + +type SceneTagUpdater interface { + models.TagIDLoader + scene.PartialUpdater +} + func getSceneFileTagger(s *models.Scene, cache *match.Cache) tagger { return tagger{ ID: s.ID, Type: "scene", - Name: s.GetTitle(), + Name: s.DisplayName(), Path: s.Path, cache: cache, } } // ScenePerformers tags the provided scene with performers whose name matches the scene's path. -func ScenePerformers(s *models.Scene, rw models.SceneReaderWriter, performerReader models.PerformerReader, cache *match.Cache) error { +func ScenePerformers(ctx context.Context, s *models.Scene, rw ScenePerformerUpdater, performerReader match.PerformerAutoTagQueryer, cache *match.Cache) error { t := getSceneFileTagger(s, cache) - return t.tagPerformers(performerReader, func(subjectID, otherID int) (bool, error) { - return scene.AddPerformer(rw, subjectID, otherID) + return t.tagPerformers(ctx, performerReader, func(subjectID, otherID int) (bool, error) { + if err := s.LoadPerformerIDs(ctx, rw); err != nil { + return false, err + } + existing := s.PerformerIDs.List() + + if intslice.IntInclude(existing, otherID) { + return false, nil + } + + if err := scene.AddPerformer(ctx, rw, s, otherID); err != nil { + return false, err + } + + return true, nil }) } // SceneStudios tags the provided scene with the first studio whose name matches the scene's path. // // Scenes will not be tagged if studio is already set. -func SceneStudios(s *models.Scene, rw models.SceneReaderWriter, studioReader models.StudioReader, cache *match.Cache) error { - if s.StudioID.Valid { +func SceneStudios(ctx context.Context, s *models.Scene, rw SceneFinderUpdater, studioReader match.StudioAutoTagQueryer, cache *match.Cache) error { + if s.StudioID != nil { // don't modify return nil } t := getSceneFileTagger(s, cache) - return t.tagStudios(studioReader, func(subjectID, otherID int) (bool, error) { - return addSceneStudio(rw, subjectID, otherID) + return t.tagStudios(ctx, studioReader, func(subjectID, otherID int) (bool, error) { + return addSceneStudio(ctx, rw, s, otherID) }) } // SceneTags tags the provided scene with tags whose name matches the scene's path. -func SceneTags(s *models.Scene, rw models.SceneReaderWriter, tagReader models.TagReader, cache *match.Cache) error { +func SceneTags(ctx context.Context, s *models.Scene, rw SceneTagUpdater, tagReader match.TagAutoTagQueryer, cache *match.Cache) error { t := getSceneFileTagger(s, cache) - return t.tagTags(tagReader, func(subjectID, otherID int) (bool, error) { - return scene.AddTag(rw, subjectID, otherID) + return t.tagTags(ctx, tagReader, func(subjectID, otherID int) (bool, error) { + if err := s.LoadTagIDs(ctx, rw); err != nil { + return false, err + } + existing := s.TagIDs.List() + + if intslice.IntInclude(existing, otherID) { + return false, nil + } + + if err := scene.AddTag(ctx, rw, s, otherID); err != nil { + return false, err + } + + return true, nil }) } diff --git a/internal/autotag/scene_test.go b/internal/autotag/scene_test.go index 578b9e7f6..1e9766836 100644 --- a/internal/autotag/scene_test.go +++ b/internal/autotag/scene_test.go @@ -2,6 +2,7 @@ package autotag import ( "fmt" + "path/filepath" "strings" "testing" @@ -33,13 +34,10 @@ func generateNamePatterns(name, separator, ext string) []string { ret = append(ret, fmt.Sprintf("%s%saaa.%s", name, separator, ext)) ret = append(ret, fmt.Sprintf("aaa%s%s.%s", separator, name, ext)) ret = append(ret, fmt.Sprintf("aaa%s%s%sbbb.%s", separator, name, separator, ext)) - ret = append(ret, fmt.Sprintf("dir/%s%saaa.%s", name, separator, ext)) - ret = append(ret, fmt.Sprintf("dir%sdir/%s%saaa.%s", separator, name, separator, ext)) - ret = append(ret, fmt.Sprintf("dir\\%s%saaa.%s", name, separator, ext)) - ret = append(ret, fmt.Sprintf("%s%saaa/dir/bbb.%s", name, separator, ext)) - ret = append(ret, fmt.Sprintf("%s%saaa\\dir\\bbb.%s", name, separator, ext)) - ret = append(ret, fmt.Sprintf("dir/%s%s/aaa.%s", name, separator, ext)) - ret = append(ret, fmt.Sprintf("dir\\%s%s\\aaa.%s", name, separator, ext)) + ret = append(ret, filepath.Join("dir", fmt.Sprintf("%s%saaa.%s", name, separator, ext))) + ret = append(ret, filepath.Join(fmt.Sprintf("dir%sdir", separator), fmt.Sprintf("%s%saaa.%s", name, separator, ext))) + ret = append(ret, filepath.Join(fmt.Sprintf("%s%saaa", name, separator), "dir", fmt.Sprintf("bbb.%s", ext))) + ret = append(ret, filepath.Join("dir", fmt.Sprintf("%s%s", name, separator), fmt.Sprintf("aaa.%s", ext))) return ret } @@ -90,8 +88,7 @@ func generateTestPaths(testName, ext string) (scenePatterns []string, falseScene falseScenePatterns = append(falseScenePatterns, fmt.Sprintf("%saaa.%s", testName, ext)) // add path separator false scenarios - falseScenePatterns = append(falseScenePatterns, generateFalseNamePatterns(testName, "/", ext)...) - falseScenePatterns = append(falseScenePatterns, generateFalseNamePatterns(testName, "\\", ext)...) + falseScenePatterns = append(falseScenePatterns, generateFalseNamePatterns(testName, string(filepath.Separator), ext)...) // split patterns only valid for ._- and whitespace for _, separator := range testSeparators { @@ -173,19 +170,25 @@ func TestScenePerformers(t *testing.T) { mockPerformerReader := &mocks.PerformerReaderWriter{} mockSceneReader := &mocks.SceneReaderWriter{} - mockPerformerReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockPerformerReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Performer{&performer, &reversedPerformer}, nil).Once() - - if test.Matches { - mockSceneReader.On("GetPerformerIDs", sceneID).Return(nil, nil).Once() - mockSceneReader.On("UpdatePerformers", sceneID, []int{performerID}).Return(nil).Once() - } + mockPerformerReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockPerformerReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Performer{&performer, &reversedPerformer}, nil).Once() scene := models.Scene{ - ID: sceneID, - Path: test.Path, + ID: sceneID, + Path: test.Path, + PerformerIDs: models.NewRelatedIDs([]int{}), } - err := ScenePerformers(&scene, mockSceneReader, mockPerformerReader, nil) + + if test.Matches { + mockSceneReader.On("UpdatePartial", testCtx, sceneID, models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() + } + + err := ScenePerformers(testCtx, &scene, mockSceneReader, mockPerformerReader, nil) assert.Nil(err) mockPerformerReader.AssertExpectations(t) @@ -196,9 +199,11 @@ func TestScenePerformers(t *testing.T) { func TestSceneStudios(t *testing.T) { t.Parallel() - const sceneID = 1 - const studioName = "studio name" - const studioID = 2 + var ( + sceneID = 1 + studioName = "studio name" + studioID = 2 + ) studio := models.Studio{ ID: studioID, Name: models.NullString(studioName), @@ -217,11 +222,9 @@ func TestSceneStudios(t *testing.T) { doTest := func(mockStudioReader *mocks.StudioReaderWriter, mockSceneReader *mocks.SceneReaderWriter, test pathTestTable) { if test.Matches { - mockSceneReader.On("Find", sceneID).Return(&models.Scene{}, nil).Once() - expectedStudioID := models.NullInt64(studioID) - mockSceneReader.On("Update", models.ScenePartial{ - ID: sceneID, - StudioID: &expectedStudioID, + expectedStudioID := studioID + mockSceneReader.On("UpdatePartial", testCtx, sceneID, models.ScenePartial{ + StudioID: models.NewOptionalInt(expectedStudioID), }).Return(nil, nil).Once() } @@ -229,7 +232,7 @@ func TestSceneStudios(t *testing.T) { ID: sceneID, Path: test.Path, } - err := SceneStudios(&scene, mockSceneReader, mockStudioReader, nil) + err := SceneStudios(testCtx, &scene, mockSceneReader, mockStudioReader, nil) assert.Nil(err) mockStudioReader.AssertExpectations(t) @@ -240,9 +243,9 @@ func TestSceneStudios(t *testing.T) { mockStudioReader := &mocks.StudioReaderWriter{} mockSceneReader := &mocks.SceneReaderWriter{} - mockStudioReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockStudioReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() - mockStudioReader.On("GetAliases", mock.Anything).Return([]string{}, nil).Maybe() + mockStudioReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockStudioReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() + mockStudioReader.On("GetAliases", testCtx, mock.Anything).Return([]string{}, nil).Maybe() doTest(mockStudioReader, mockSceneReader, test) } @@ -255,12 +258,12 @@ func TestSceneStudios(t *testing.T) { mockStudioReader := &mocks.StudioReaderWriter{} mockSceneReader := &mocks.SceneReaderWriter{} - mockStudioReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockStudioReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() - mockStudioReader.On("GetAliases", studioID).Return([]string{ + mockStudioReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockStudioReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Studio{&studio, &reversedStudio}, nil).Once() + mockStudioReader.On("GetAliases", testCtx, studioID).Return([]string{ studioName, }, nil).Once() - mockStudioReader.On("GetAliases", reversedStudioID).Return([]string{}, nil).Once() + mockStudioReader.On("GetAliases", testCtx, reversedStudioID).Return([]string{}, nil).Once() doTest(mockStudioReader, mockSceneReader, test) } @@ -290,15 +293,20 @@ func TestSceneTags(t *testing.T) { doTest := func(mockTagReader *mocks.TagReaderWriter, mockSceneReader *mocks.SceneReaderWriter, test pathTestTable) { if test.Matches { - mockSceneReader.On("GetTagIDs", sceneID).Return(nil, nil).Once() - mockSceneReader.On("UpdateTags", sceneID, []int{tagID}).Return(nil).Once() + mockSceneReader.On("UpdatePartial", testCtx, sceneID, models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } scene := models.Scene{ - ID: sceneID, - Path: test.Path, + ID: sceneID, + Path: test.Path, + TagIDs: models.NewRelatedIDs([]int{}), } - err := SceneTags(&scene, mockSceneReader, mockTagReader, nil) + err := SceneTags(testCtx, &scene, mockSceneReader, mockTagReader, nil) assert.Nil(err) mockTagReader.AssertExpectations(t) @@ -309,9 +317,9 @@ func TestSceneTags(t *testing.T) { mockTagReader := &mocks.TagReaderWriter{} mockSceneReader := &mocks.SceneReaderWriter{} - mockTagReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockTagReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() - mockTagReader.On("GetAliases", mock.Anything).Return([]string{}, nil).Maybe() + mockTagReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockTagReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() + mockTagReader.On("GetAliases", testCtx, mock.Anything).Return([]string{}, nil).Maybe() doTest(mockTagReader, mockSceneReader, test) } @@ -324,12 +332,12 @@ func TestSceneTags(t *testing.T) { mockTagReader := &mocks.TagReaderWriter{} mockSceneReader := &mocks.SceneReaderWriter{} - mockTagReader.On("Query", mock.Anything, mock.Anything).Return(nil, 0, nil) - mockTagReader.On("QueryForAutoTag", mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() - mockTagReader.On("GetAliases", tagID).Return([]string{ + mockTagReader.On("Query", testCtx, mock.Anything, mock.Anything).Return(nil, 0, nil) + mockTagReader.On("QueryForAutoTag", testCtx, mock.Anything).Return([]*models.Tag{&tag, &reversedTag}, nil).Once() + mockTagReader.On("GetAliases", testCtx, tagID).Return([]string{ tagName, }, nil).Once() - mockTagReader.On("GetAliases", reversedTagID).Return([]string{}, nil).Once() + mockTagReader.On("GetAliases", testCtx, reversedTagID).Return([]string{}, nil).Once() doTest(mockTagReader, mockSceneReader, test) } diff --git a/internal/autotag/studio.go b/internal/autotag/studio.go index 4a02e7305..4a7099dc1 100644 --- a/internal/autotag/studio.go +++ b/internal/autotag/studio.go @@ -1,79 +1,61 @@ package autotag import ( - "database/sql" + "context" + "github.com/stashapp/stash/pkg/gallery" + "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/scene" ) -func addSceneStudio(sceneWriter models.SceneReaderWriter, sceneID, studioID int) (bool, error) { +func addSceneStudio(ctx context.Context, sceneWriter scene.PartialUpdater, o *models.Scene, studioID int) (bool, error) { // don't set if already set - scene, err := sceneWriter.Find(sceneID) - if err != nil { - return false, err - } - - if scene.StudioID.Valid { + if o.StudioID != nil { return false, nil } // set the studio id - s := sql.NullInt64{Int64: int64(studioID), Valid: true} scenePartial := models.ScenePartial{ - ID: sceneID, - StudioID: &s, + StudioID: models.NewOptionalInt(studioID), } - if _, err := sceneWriter.Update(scenePartial); err != nil { + if _, err := sceneWriter.UpdatePartial(ctx, o.ID, scenePartial); err != nil { return false, err } return true, nil } -func addImageStudio(imageWriter models.ImageReaderWriter, imageID, studioID int) (bool, error) { +func addImageStudio(ctx context.Context, imageWriter image.PartialUpdater, i *models.Image, studioID int) (bool, error) { // don't set if already set - image, err := imageWriter.Find(imageID) - if err != nil { - return false, err - } - - if image.StudioID.Valid { + if i.StudioID != nil { return false, nil } // set the studio id - s := sql.NullInt64{Int64: int64(studioID), Valid: true} imagePartial := models.ImagePartial{ - ID: imageID, - StudioID: &s, + StudioID: models.NewOptionalInt(studioID), } - if _, err := imageWriter.Update(imagePartial); err != nil { + if _, err := imageWriter.UpdatePartial(ctx, i.ID, imagePartial); err != nil { return false, err } return true, nil } -func addGalleryStudio(galleryWriter models.GalleryReaderWriter, galleryID, studioID int) (bool, error) { +func addGalleryStudio(ctx context.Context, galleryWriter GalleryFinderUpdater, o *models.Gallery, studioID int) (bool, error) { // don't set if already set - gallery, err := galleryWriter.Find(galleryID) - if err != nil { - return false, err - } - - if gallery.StudioID.Valid { + if o.StudioID != nil { return false, nil } // set the studio id - s := sql.NullInt64{Int64: int64(studioID), Valid: true} galleryPartial := models.GalleryPartial{ - ID: galleryID, - StudioID: &s, + StudioID: models.NewOptionalInt(studioID), } - if _, err := galleryWriter.UpdatePartial(galleryPartial); err != nil { + if _, err := galleryWriter.UpdatePartial(ctx, o.ID, galleryPartial); err != nil { return false, err } return true, nil @@ -98,13 +80,18 @@ func getStudioTagger(p *models.Studio, aliases []string, cache *match.Cache) []t return ret } +type SceneFinderUpdater interface { + scene.Queryer + scene.PartialUpdater +} + // StudioScenes searches for scenes whose path matches the provided studio name and tags the scene with the studio, if studio is not already set on the scene. -func StudioScenes(p *models.Studio, paths []string, aliases []string, rw models.SceneReaderWriter, cache *match.Cache) error { +func StudioScenes(ctx context.Context, p *models.Studio, paths []string, aliases []string, rw SceneFinderUpdater, cache *match.Cache) error { t := getStudioTagger(p, aliases, cache) for _, tt := range t { - if err := tt.tagScenes(paths, rw, func(subjectID, otherID int) (bool, error) { - return addSceneStudio(rw, otherID, subjectID) + if err := tt.tagScenes(ctx, paths, rw, func(o *models.Scene) (bool, error) { + return addSceneStudio(ctx, rw, o, p.ID) }); err != nil { return err } @@ -113,13 +100,19 @@ func StudioScenes(p *models.Studio, paths []string, aliases []string, rw models. return nil } +type ImageFinderUpdater interface { + image.Queryer + Find(ctx context.Context, id int) (*models.Image, error) + UpdatePartial(ctx context.Context, id int, partial models.ImagePartial) (*models.Image, error) +} + // StudioImages searches for images whose path matches the provided studio name and tags the image with the studio, if studio is not already set on the image. -func StudioImages(p *models.Studio, paths []string, aliases []string, rw models.ImageReaderWriter, cache *match.Cache) error { +func StudioImages(ctx context.Context, p *models.Studio, paths []string, aliases []string, rw ImageFinderUpdater, cache *match.Cache) error { t := getStudioTagger(p, aliases, cache) for _, tt := range t { - if err := tt.tagImages(paths, rw, func(subjectID, otherID int) (bool, error) { - return addImageStudio(rw, otherID, subjectID) + if err := tt.tagImages(ctx, paths, rw, func(i *models.Image) (bool, error) { + return addImageStudio(ctx, rw, i, p.ID) }); err != nil { return err } @@ -128,13 +121,19 @@ func StudioImages(p *models.Studio, paths []string, aliases []string, rw models. return nil } +type GalleryFinderUpdater interface { + gallery.Queryer + gallery.PartialUpdater + Find(ctx context.Context, id int) (*models.Gallery, error) +} + // StudioGalleries searches for galleries whose path matches the provided studio name and tags the gallery with the studio, if studio is not already set on the gallery. -func StudioGalleries(p *models.Studio, paths []string, aliases []string, rw models.GalleryReaderWriter, cache *match.Cache) error { +func StudioGalleries(ctx context.Context, p *models.Studio, paths []string, aliases []string, rw GalleryFinderUpdater, cache *match.Cache) error { t := getStudioTagger(p, aliases, cache) for _, tt := range t { - if err := tt.tagGalleries(paths, rw, func(subjectID, otherID int) (bool, error) { - return addGalleryStudio(rw, otherID, subjectID) + if err := tt.tagGalleries(ctx, paths, rw, func(o *models.Gallery) (bool, error) { + return addGalleryStudio(ctx, rw, o, p.ID) }); err != nil { return err } diff --git a/internal/autotag/studio_test.go b/internal/autotag/studio_test.go index 76d7e7db5..f7513ad03 100644 --- a/internal/autotag/studio_test.go +++ b/internal/autotag/studio_test.go @@ -1,6 +1,7 @@ package autotag import ( + "path/filepath" "testing" "github.com/stashapp/stash/pkg/image" @@ -17,49 +18,60 @@ type testStudioCase struct { aliasRegex string } -var testStudioCases = []testStudioCase{ - { - "studio name", - `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - "", - "", - }, - { - "studio + name", - `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - "", - "", - }, - { - `studio + name\`, - `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, - "", - "", - }, - { - "studio name", - `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - "alias name", - `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - }, - { - "studio + name", - `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - "alias + name", - `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - }, - { - `studio + name\`, - `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, - `alias + name\`, - `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, - }, -} +var ( + testStudioCases = []testStudioCase{ + { + "studio name", + `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + "", + "", + }, + { + "studio + name", + `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + "", + "", + }, + { + "studio name", + `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + "alias name", + `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + }, + { + "studio + name", + `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + "alias + name", + `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + }, + } + + trailingBackslashStudioCases = []testStudioCase{ + { + `studio + name\`, + `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, + "", + "", + }, + { + `studio + name\`, + `(?i)(?:^|_|[^\p{L}\d])studio[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, + `alias + name\`, + `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, + }, + } +) func TestStudioScenes(t *testing.T) { t.Parallel() - for _, p := range testStudioCases { + tc := testStudioCases + // trailing backslash tests only work where filepath separator is not backslash + if filepath.Separator != '\\' { + tc = append(tc, trailingBackslashStudioCases...) + } + + for _, p := range tc { testStudioScenes(t, p) } } @@ -72,7 +84,7 @@ func testStudioScenes(t *testing.T, tc testStudioCase) { mockSceneReader := &mocks.SceneReaderWriter{} - const studioID = 2 + var studioID = 2 var aliases []string @@ -113,7 +125,7 @@ func testStudioScenes(t *testing.T, tc testStudioCase) { } // if alias provided, then don't find by name - onNameQuery := mockSceneReader.On("Query", scene.QueryOptions(expectedSceneFilter, expectedFindFilter, false)) + onNameQuery := mockSceneReader.On("Query", testCtx, scene.QueryOptions(expectedSceneFilter, expectedFindFilter, false)) if aliasName == "" { onNameQuery.Return(mocks.SceneQueryResult(scenes, len(scenes)), nil).Once() @@ -128,21 +140,19 @@ func testStudioScenes(t *testing.T, tc testStudioCase) { }, } - mockSceneReader.On("Query", scene.QueryOptions(expectedAliasFilter, expectedFindFilter, false)). + mockSceneReader.On("Query", testCtx, scene.QueryOptions(expectedAliasFilter, expectedFindFilter, false)). Return(mocks.SceneQueryResult(scenes, len(scenes)), nil).Once() } for i := range matchingPaths { sceneID := i + 1 - mockSceneReader.On("Find", sceneID).Return(&models.Scene{}, nil).Once() - expectedStudioID := models.NullInt64(studioID) - mockSceneReader.On("Update", models.ScenePartial{ - ID: sceneID, - StudioID: &expectedStudioID, + expectedStudioID := studioID + mockSceneReader.On("UpdatePartial", testCtx, sceneID, models.ScenePartial{ + StudioID: models.NewOptionalInt(expectedStudioID), }).Return(nil, nil).Once() } - err := StudioScenes(&studio, nil, aliases, mockSceneReader, nil) + err := StudioScenes(testCtx, &studio, nil, aliases, mockSceneReader, nil) assert := assert.New(t) @@ -166,7 +176,7 @@ func testStudioImages(t *testing.T, tc testStudioCase) { mockImageReader := &mocks.ImageReaderWriter{} - const studioID = 2 + var studioID = 2 var aliases []string @@ -206,7 +216,7 @@ func testStudioImages(t *testing.T, tc testStudioCase) { } // if alias provided, then don't find by name - onNameQuery := mockImageReader.On("Query", image.QueryOptions(expectedImageFilter, expectedFindFilter, false)) + onNameQuery := mockImageReader.On("Query", testCtx, image.QueryOptions(expectedImageFilter, expectedFindFilter, false)) if aliasName == "" { onNameQuery.Return(mocks.ImageQueryResult(images, len(images)), nil).Once() } else { @@ -220,21 +230,19 @@ func testStudioImages(t *testing.T, tc testStudioCase) { }, } - mockImageReader.On("Query", image.QueryOptions(expectedAliasFilter, expectedFindFilter, false)). + mockImageReader.On("Query", testCtx, image.QueryOptions(expectedAliasFilter, expectedFindFilter, false)). Return(mocks.ImageQueryResult(images, len(images)), nil).Once() } for i := range matchingPaths { imageID := i + 1 - mockImageReader.On("Find", imageID).Return(&models.Image{}, nil).Once() - expectedStudioID := models.NullInt64(studioID) - mockImageReader.On("Update", models.ImagePartial{ - ID: imageID, - StudioID: &expectedStudioID, + expectedStudioID := studioID + mockImageReader.On("UpdatePartial", testCtx, imageID, models.ImagePartial{ + StudioID: models.NewOptionalInt(expectedStudioID), }).Return(nil, nil).Once() } - err := StudioImages(&studio, nil, aliases, mockImageReader, nil) + err := StudioImages(testCtx, &studio, nil, aliases, mockImageReader, nil) assert := assert.New(t) @@ -257,7 +265,7 @@ func testStudioGalleries(t *testing.T, tc testStudioCase) { aliasRegex := tc.aliasRegex mockGalleryReader := &mocks.GalleryReaderWriter{} - const studioID = 2 + var studioID = 2 var aliases []string @@ -270,9 +278,10 @@ func testStudioGalleries(t *testing.T, tc testStudioCase) { var galleries []*models.Gallery matchingPaths, falsePaths := generateTestPaths(testPathName, galleryExt) for i, p := range append(matchingPaths, falsePaths...) { + v := p galleries = append(galleries, &models.Gallery{ ID: i + 1, - Path: models.NullString(p), + Path: v, }) } @@ -297,7 +306,7 @@ func testStudioGalleries(t *testing.T, tc testStudioCase) { } // if alias provided, then don't find by name - onNameQuery := mockGalleryReader.On("Query", expectedGalleryFilter, expectedFindFilter) + onNameQuery := mockGalleryReader.On("Query", testCtx, expectedGalleryFilter, expectedFindFilter) if aliasName == "" { onNameQuery.Return(galleries, len(galleries), nil).Once() } else { @@ -311,20 +320,18 @@ func testStudioGalleries(t *testing.T, tc testStudioCase) { }, } - mockGalleryReader.On("Query", expectedAliasFilter, expectedFindFilter).Return(galleries, len(galleries), nil).Once() + mockGalleryReader.On("Query", testCtx, expectedAliasFilter, expectedFindFilter).Return(galleries, len(galleries), nil).Once() } for i := range matchingPaths { galleryID := i + 1 - mockGalleryReader.On("Find", galleryID).Return(&models.Gallery{}, nil).Once() - expectedStudioID := models.NullInt64(studioID) - mockGalleryReader.On("UpdatePartial", models.GalleryPartial{ - ID: galleryID, - StudioID: &expectedStudioID, + expectedStudioID := studioID + mockGalleryReader.On("UpdatePartial", testCtx, galleryID, models.GalleryPartial{ + StudioID: models.NewOptionalInt(expectedStudioID), }).Return(nil, nil).Once() } - err := StudioGalleries(&studio, nil, aliases, mockGalleryReader, nil) + err := StudioGalleries(testCtx, &studio, nil, aliases, mockGalleryReader, nil) assert := assert.New(t) diff --git a/internal/autotag/tag.go b/internal/autotag/tag.go index f0d080871..ab90b62cc 100644 --- a/internal/autotag/tag.go +++ b/internal/autotag/tag.go @@ -1,13 +1,34 @@ package autotag import ( + "context" + "github.com/stashapp/stash/pkg/gallery" "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) +type SceneQueryTagUpdater interface { + scene.Queryer + models.TagIDLoader + scene.PartialUpdater +} + +type ImageQueryTagUpdater interface { + image.Queryer + models.TagIDLoader + image.PartialUpdater +} + +type GalleryQueryTagUpdater interface { + gallery.Queryer + models.TagIDLoader + gallery.PartialUpdater +} + func getTagTaggers(p *models.Tag, aliases []string, cache *match.Cache) []tagger { ret := []tagger{{ ID: p.ID, @@ -29,12 +50,25 @@ func getTagTaggers(p *models.Tag, aliases []string, cache *match.Cache) []tagger } // TagScenes searches for scenes whose path matches the provided tag name and tags the scene with the tag. -func TagScenes(p *models.Tag, paths []string, aliases []string, rw models.SceneReaderWriter, cache *match.Cache) error { +func TagScenes(ctx context.Context, p *models.Tag, paths []string, aliases []string, rw SceneQueryTagUpdater, cache *match.Cache) error { t := getTagTaggers(p, aliases, cache) for _, tt := range t { - if err := tt.tagScenes(paths, rw, func(subjectID, otherID int) (bool, error) { - return scene.AddTag(rw, otherID, subjectID) + if err := tt.tagScenes(ctx, paths, rw, func(o *models.Scene) (bool, error) { + if err := o.LoadTagIDs(ctx, rw); err != nil { + return false, err + } + existing := o.TagIDs.List() + + if intslice.IntInclude(existing, p.ID) { + return false, nil + } + + if err := scene.AddTag(ctx, rw, o, p.ID); err != nil { + return false, err + } + + return true, nil }); err != nil { return err } @@ -43,12 +77,25 @@ func TagScenes(p *models.Tag, paths []string, aliases []string, rw models.SceneR } // TagImages searches for images whose path matches the provided tag name and tags the image with the tag. -func TagImages(p *models.Tag, paths []string, aliases []string, rw models.ImageReaderWriter, cache *match.Cache) error { +func TagImages(ctx context.Context, p *models.Tag, paths []string, aliases []string, rw ImageQueryTagUpdater, cache *match.Cache) error { t := getTagTaggers(p, aliases, cache) for _, tt := range t { - if err := tt.tagImages(paths, rw, func(subjectID, otherID int) (bool, error) { - return image.AddTag(rw, otherID, subjectID) + if err := tt.tagImages(ctx, paths, rw, func(o *models.Image) (bool, error) { + if err := o.LoadTagIDs(ctx, rw); err != nil { + return false, err + } + existing := o.TagIDs.List() + + if intslice.IntInclude(existing, p.ID) { + return false, nil + } + + if err := image.AddTag(ctx, rw, o, p.ID); err != nil { + return false, err + } + + return true, nil }); err != nil { return err } @@ -57,12 +104,25 @@ func TagImages(p *models.Tag, paths []string, aliases []string, rw models.ImageR } // TagGalleries searches for galleries whose path matches the provided tag name and tags the gallery with the tag. -func TagGalleries(p *models.Tag, paths []string, aliases []string, rw models.GalleryReaderWriter, cache *match.Cache) error { +func TagGalleries(ctx context.Context, p *models.Tag, paths []string, aliases []string, rw GalleryQueryTagUpdater, cache *match.Cache) error { t := getTagTaggers(p, aliases, cache) for _, tt := range t { - if err := tt.tagGalleries(paths, rw, func(subjectID, otherID int) (bool, error) { - return gallery.AddTag(rw, otherID, subjectID) + if err := tt.tagGalleries(ctx, paths, rw, func(o *models.Gallery) (bool, error) { + if err := o.LoadTagIDs(ctx, rw); err != nil { + return false, err + } + existing := o.TagIDs.List() + + if intslice.IntInclude(existing, p.ID) { + return false, nil + } + + if err := gallery.AddTag(ctx, rw, o, p.ID); err != nil { + return false, err + } + + return true, nil }); err != nil { return err } diff --git a/internal/autotag/tag_test.go b/internal/autotag/tag_test.go index a1eed1eab..e4fe3fa13 100644 --- a/internal/autotag/tag_test.go +++ b/internal/autotag/tag_test.go @@ -1,6 +1,7 @@ package autotag import ( + "path/filepath" "testing" "github.com/stashapp/stash/pkg/image" @@ -17,49 +18,60 @@ type testTagCase struct { aliasRegex string } -var testTagCases = []testTagCase{ - { - "tag name", - `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - "", - "", - }, - { - "tag + name", - `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - "", - "", - }, - { - `tag + name\`, - `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, - "", - "", - }, - { - "tag name", - `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - "alias name", - `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - }, - { - "tag + name", - `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - "alias + name", - `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, - }, - { - `tag + name\`, - `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, - `alias + name\`, - `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, - }, -} +var ( + testTagCases = []testTagCase{ + { + "tag name", + `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + "", + "", + }, + { + "tag + name", + `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + "", + "", + }, + { + "tag name", + `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + "alias name", + `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + }, + { + "tag + name", + `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + "alias + name", + `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*\+[.\-_ ]*name(?:$|_|[^\p{L}\d])`, + }, + } + + trailingBackslashCases = []testTagCase{ + { + `tag + name\`, + `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, + "", + "", + }, + { + `tag + name\`, + `(?i)(?:^|_|[^\p{L}\d])tag[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, + `alias + name\`, + `(?i)(?:^|_|[^\p{L}\d])alias[.\-_ ]*\+[.\-_ ]*name\\(?:$|_|[^\p{L}\d])`, + }, + } +) func TestTagScenes(t *testing.T) { t.Parallel() - for _, p := range testTagCases { + tc := testTagCases + // trailing backslash tests only work where filepath separator is not backslash + if filepath.Separator != '\\' { + tc = append(tc, trailingBackslashCases...) + } + + for _, p := range tc { testTagScenes(t, p) } } @@ -87,8 +99,9 @@ func testTagScenes(t *testing.T, tc testTagCase) { var scenes []*models.Scene for i, p := range append(matchingPaths, falsePaths...) { scenes = append(scenes, &models.Scene{ - ID: i + 1, - Path: p, + ID: i + 1, + Path: p, + TagIDs: models.NewRelatedIDs([]int{}), }) } @@ -113,7 +126,7 @@ func testTagScenes(t *testing.T, tc testTagCase) { } // if alias provided, then don't find by name - onNameQuery := mockSceneReader.On("Query", scene.QueryOptions(expectedSceneFilter, expectedFindFilter, false)) + onNameQuery := mockSceneReader.On("Query", testCtx, scene.QueryOptions(expectedSceneFilter, expectedFindFilter, false)) if aliasName == "" { onNameQuery.Return(mocks.SceneQueryResult(scenes, len(scenes)), nil).Once() } else { @@ -127,17 +140,21 @@ func testTagScenes(t *testing.T, tc testTagCase) { }, } - mockSceneReader.On("Query", scene.QueryOptions(expectedAliasFilter, expectedFindFilter, false)). + mockSceneReader.On("Query", testCtx, scene.QueryOptions(expectedAliasFilter, expectedFindFilter, false)). Return(mocks.SceneQueryResult(scenes, len(scenes)), nil).Once() } for i := range matchingPaths { sceneID := i + 1 - mockSceneReader.On("GetTagIDs", sceneID).Return(nil, nil).Once() - mockSceneReader.On("UpdateTags", sceneID, []int{tagID}).Return(nil).Once() + mockSceneReader.On("UpdatePartial", testCtx, sceneID, models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } - err := TagScenes(&tag, nil, aliases, mockSceneReader, nil) + err := TagScenes(testCtx, &tag, nil, aliases, mockSceneReader, nil) assert := assert.New(t) @@ -175,8 +192,9 @@ func testTagImages(t *testing.T, tc testTagCase) { matchingPaths, falsePaths := generateTestPaths(testPathName, "mp4") for i, p := range append(matchingPaths, falsePaths...) { images = append(images, &models.Image{ - ID: i + 1, - Path: p, + ID: i + 1, + Path: p, + TagIDs: models.NewRelatedIDs([]int{}), }) } @@ -201,7 +219,7 @@ func testTagImages(t *testing.T, tc testTagCase) { } // if alias provided, then don't find by name - onNameQuery := mockImageReader.On("Query", image.QueryOptions(expectedImageFilter, expectedFindFilter, false)) + onNameQuery := mockImageReader.On("Query", testCtx, image.QueryOptions(expectedImageFilter, expectedFindFilter, false)) if aliasName == "" { onNameQuery.Return(mocks.ImageQueryResult(images, len(images)), nil).Once() } else { @@ -215,17 +233,22 @@ func testTagImages(t *testing.T, tc testTagCase) { }, } - mockImageReader.On("Query", image.QueryOptions(expectedAliasFilter, expectedFindFilter, false)). + mockImageReader.On("Query", testCtx, image.QueryOptions(expectedAliasFilter, expectedFindFilter, false)). Return(mocks.ImageQueryResult(images, len(images)), nil).Once() } for i := range matchingPaths { imageID := i + 1 - mockImageReader.On("GetTagIDs", imageID).Return(nil, nil).Once() - mockImageReader.On("UpdateTags", imageID, []int{tagID}).Return(nil).Once() + + mockImageReader.On("UpdatePartial", testCtx, imageID, models.ImagePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() } - err := TagImages(&tag, nil, aliases, mockImageReader, nil) + err := TagImages(testCtx, &tag, nil, aliases, mockImageReader, nil) assert := assert.New(t) @@ -262,9 +285,11 @@ func testTagGalleries(t *testing.T, tc testTagCase) { var galleries []*models.Gallery matchingPaths, falsePaths := generateTestPaths(testPathName, "mp4") for i, p := range append(matchingPaths, falsePaths...) { + v := p galleries = append(galleries, &models.Gallery{ - ID: i + 1, - Path: models.NullString(p), + ID: i + 1, + Path: v, + TagIDs: models.NewRelatedIDs([]int{}), }) } @@ -289,7 +314,7 @@ func testTagGalleries(t *testing.T, tc testTagCase) { } // if alias provided, then don't find by name - onNameQuery := mockGalleryReader.On("Query", expectedGalleryFilter, expectedFindFilter) + onNameQuery := mockGalleryReader.On("Query", testCtx, expectedGalleryFilter, expectedFindFilter) if aliasName == "" { onNameQuery.Return(galleries, len(galleries), nil).Once() } else { @@ -303,16 +328,22 @@ func testTagGalleries(t *testing.T, tc testTagCase) { }, } - mockGalleryReader.On("Query", expectedAliasFilter, expectedFindFilter).Return(galleries, len(galleries), nil).Once() + mockGalleryReader.On("Query", testCtx, expectedAliasFilter, expectedFindFilter).Return(galleries, len(galleries), nil).Once() } for i := range matchingPaths { galleryID := i + 1 - mockGalleryReader.On("GetTagIDs", galleryID).Return(nil, nil).Once() - mockGalleryReader.On("UpdateTags", galleryID, []int{tagID}).Return(nil).Once() + + mockGalleryReader.On("UpdatePartial", testCtx, galleryID, models.GalleryPartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }).Return(nil, nil).Once() + } - err := TagGalleries(&tag, nil, aliases, mockGalleryReader, nil) + err := TagGalleries(testCtx, &tag, nil, aliases, mockGalleryReader, nil) assert := assert.New(t) diff --git a/internal/autotag/tagger.go b/internal/autotag/tagger.go index 4ea1fbc01..0e53200ec 100644 --- a/internal/autotag/tagger.go +++ b/internal/autotag/tagger.go @@ -14,11 +14,15 @@ package autotag import ( + "context" "fmt" + "github.com/stashapp/stash/pkg/gallery" + "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/scene" ) type tagger struct { @@ -32,6 +36,9 @@ type tagger struct { } type addLinkFunc func(subjectID, otherID int) (bool, error) +type addImageLinkFunc func(o *models.Image) (bool, error) +type addGalleryLinkFunc func(o *models.Gallery) (bool, error) +type addSceneLinkFunc func(o *models.Scene) (bool, error) func (t *tagger) addError(otherType, otherName string, err error) error { return fmt.Errorf("error adding %s '%s' to %s '%s': %s", otherType, otherName, t.Type, t.Name, err.Error()) @@ -41,8 +48,8 @@ func (t *tagger) addLog(otherType, otherName string) { logger.Infof("Added %s '%s' to %s '%s'", otherType, otherName, t.Type, t.Name) } -func (t *tagger) tagPerformers(performerReader models.PerformerReader, addFunc addLinkFunc) error { - others, err := match.PathToPerformers(t.Path, performerReader, t.cache, t.trimExt) +func (t *tagger) tagPerformers(ctx context.Context, performerReader match.PerformerAutoTagQueryer, addFunc addLinkFunc) error { + others, err := match.PathToPerformers(ctx, t.Path, performerReader, t.cache, t.trimExt) if err != nil { return err } @@ -62,8 +69,8 @@ func (t *tagger) tagPerformers(performerReader models.PerformerReader, addFunc a return nil } -func (t *tagger) tagStudios(studioReader models.StudioReader, addFunc addLinkFunc) error { - studio, err := match.PathToStudio(t.Path, studioReader, t.cache, t.trimExt) +func (t *tagger) tagStudios(ctx context.Context, studioReader match.StudioAutoTagQueryer, addFunc addLinkFunc) error { + studio, err := match.PathToStudio(ctx, t.Path, studioReader, t.cache, t.trimExt) if err != nil { return err } @@ -83,8 +90,8 @@ func (t *tagger) tagStudios(studioReader models.StudioReader, addFunc addLinkFun return nil } -func (t *tagger) tagTags(tagReader models.TagReader, addFunc addLinkFunc) error { - others, err := match.PathToTags(t.Path, tagReader, t.cache, t.trimExt) +func (t *tagger) tagTags(ctx context.Context, tagReader match.TagAutoTagQueryer, addFunc addLinkFunc) error { + others, err := match.PathToTags(ctx, t.Path, tagReader, t.cache, t.trimExt) if err != nil { return err } @@ -104,63 +111,63 @@ func (t *tagger) tagTags(tagReader models.TagReader, addFunc addLinkFunc) error return nil } -func (t *tagger) tagScenes(paths []string, sceneReader models.SceneReader, addFunc addLinkFunc) error { - others, err := match.PathToScenes(t.Name, paths, sceneReader) +func (t *tagger) tagScenes(ctx context.Context, paths []string, sceneReader scene.Queryer, addFunc addSceneLinkFunc) error { + others, err := match.PathToScenes(ctx, t.Name, paths, sceneReader) if err != nil { return err } for _, p := range others { - added, err := addFunc(t.ID, p.ID) + added, err := addFunc(p) if err != nil { - return t.addError("scene", p.GetTitle(), err) + return t.addError("scene", p.DisplayName(), err) } if added { - t.addLog("scene", p.GetTitle()) + t.addLog("scene", p.DisplayName()) } } return nil } -func (t *tagger) tagImages(paths []string, imageReader models.ImageReader, addFunc addLinkFunc) error { - others, err := match.PathToImages(t.Name, paths, imageReader) +func (t *tagger) tagImages(ctx context.Context, paths []string, imageReader image.Queryer, addFunc addImageLinkFunc) error { + others, err := match.PathToImages(ctx, t.Name, paths, imageReader) if err != nil { return err } for _, p := range others { - added, err := addFunc(t.ID, p.ID) + added, err := addFunc(p) if err != nil { - return t.addError("image", p.GetTitle(), err) + return t.addError("image", p.DisplayName(), err) } if added { - t.addLog("image", p.GetTitle()) + t.addLog("image", p.DisplayName()) } } return nil } -func (t *tagger) tagGalleries(paths []string, galleryReader models.GalleryReader, addFunc addLinkFunc) error { - others, err := match.PathToGalleries(t.Name, paths, galleryReader) +func (t *tagger) tagGalleries(ctx context.Context, paths []string, galleryReader gallery.Queryer, addFunc addGalleryLinkFunc) error { + others, err := match.PathToGalleries(ctx, t.Name, paths, galleryReader) if err != nil { return err } for _, p := range others { - added, err := addFunc(t.ID, p.ID) + added, err := addFunc(p) if err != nil { - return t.addError("gallery", p.GetTitle(), err) + return t.addError("gallery", p.DisplayName(), err) } if added { - t.addLog("gallery", p.GetTitle()) + t.addLog("gallery", p.DisplayName()) } } diff --git a/internal/desktop/desktop_platform_windows.go b/internal/desktop/desktop_platform_windows.go index 1ff3786a3..ecb4060e6 100644 --- a/internal/desktop/desktop_platform_windows.go +++ b/internal/desktop/desktop_platform_windows.go @@ -28,10 +28,10 @@ func isService() bool { } // Detect if windows golang executable file is running via double click or from cmd/shell terminator -// https://stackoverflow.com/questions/8610489/distinguish-if-program-runs-by-clicking-on-the-icon-typing-its-name-in-the-cons?rq=1 -// https://github.com/shirou/w32/blob/master/kernel32.go -// https://github.com/kbinani/win/blob/master/kernel32.go#L3268 -// win.GetConsoleProcessList(new(uint32), win.DWORD(2)) +// https://stackoverflow.com/questions/8610489/distinguish-if-program-runs-by-clicking-on-the-icon-typing-its-name-in-the-cons?rq=1 +// https://github.com/shirou/w32/blob/master/kernel32.go +// https://github.com/kbinani/win/blob/master/kernel32.go#L3268 +// win.GetConsoleProcessList(new(uint32), win.DWORD(2)) // from https://gist.github.com/yougg/213250cc04a52e2b853590b06f49d865 func isDoubleClickLaunched() bool { lp := kernel32.NewProc("GetConsoleProcessList") diff --git a/internal/dlna/cds.go b/internal/dlna/cds.go index 4544b8759..60429e86e 100644 --- a/internal/dlna/cds.go +++ b/internal/dlna/cds.go @@ -41,6 +41,7 @@ import ( "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scene" "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/txn" ) var pageSize = 100 @@ -56,7 +57,6 @@ type browse struct { type contentDirectoryService struct { *Server upnp.Eventing - txnManager models.TransactionManager } func formatDurationSexagesimal(d time.Duration) string { @@ -108,9 +108,18 @@ func sceneToContainer(scene *models.Scene, parent string, host string) interface } mimeType := "video/mp4" - size, _ := strconv.Atoi(scene.Size.String) + var ( + size int + bitrate uint + duration int64 + ) - duration := int64(scene.Duration.Float64) + f := scene.Files.Primary() + if f != nil { + size = int(f.Size) + bitrate = uint(f.BitRate) + duration = int64(f.Duration) + } item.Res = append(item.Res, upnpav.Resource{ URL: (&url.URL{ @@ -124,8 +133,7 @@ func sceneToContainer(scene *models.Scene, parent string, host string) interface ProtocolInfo: fmt.Sprintf("http-get:*:%s:%s", mimeType, dlna.ContentFeatures{ SupportRange: true, }.String()), - Bitrate: uint(scene.Bitrate.Int64), - // TODO - make %d:%02d:%02d string + Bitrate: bitrate, Duration: formatDurationSexagesimal(time.Duration(duration) * time.Second), Size: uint64(size), // Resolution: resolution, @@ -352,8 +360,12 @@ func (me *contentDirectoryService) handleBrowseMetadata(obj object, host string) } else { var scene *models.Scene - if err := me.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error { - scene, err = r.Scene().Find(sceneID) + if err := txn.WithTxn(context.TODO(), me.txnManager, func(ctx context.Context) error { + scene, err = me.repository.SceneFinder.Find(ctx, sceneID) + if scene != nil { + err = scene.LoadPrimaryFile(ctx, me.repository.FileFinder) + } + if err != nil { return err } @@ -370,7 +382,7 @@ func (me *contentDirectoryService) handleBrowseMetadata(obj object, host string) // http://upnp.org/specs/av/UPnP-av-ContentDirectory-v1-Service.pdf // maximum update ID is 2**32, then rolls back to 0 const maxUpdateID int64 = 1 << 32 - updateID = fmt.Sprint(scene.UpdatedAt.Timestamp.Unix() % maxUpdateID) + updateID = fmt.Sprint(scene.UpdatedAt.Unix() % maxUpdateID) } else { return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, "scene not found") } @@ -431,14 +443,14 @@ func getRootObjects() []interface{} { func (me *contentDirectoryService) getVideos(sceneFilter *models.SceneFilterType, parentID string, host string) []interface{} { var objs []interface{} - if err := me.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error { + if err := txn.WithTxn(context.TODO(), me.txnManager, func(ctx context.Context) error { sort := "title" findFilter := &models.FindFilterType{ PerPage: &pageSize, Sort: &sort, } - scenes, total, err := scene.QueryWithCount(r.Scene(), sceneFilter, findFilter) + scenes, total, err := scene.QueryWithCount(ctx, me.repository.SceneFinder, sceneFilter, findFilter) if err != nil { return err } @@ -449,7 +461,7 @@ func (me *contentDirectoryService) getVideos(sceneFilter *models.SceneFilterType parentID: parentID, } - objs, err = pager.getPages(r, total) + objs, err = pager.getPages(ctx, me.repository.SceneFinder, total) if err != nil { return err } @@ -470,14 +482,14 @@ func (me *contentDirectoryService) getVideos(sceneFilter *models.SceneFilterType func (me *contentDirectoryService) getPageVideos(sceneFilter *models.SceneFilterType, parentID string, page int, host string) []interface{} { var objs []interface{} - if err := me.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error { + if err := txn.WithTxn(context.TODO(), me.txnManager, func(ctx context.Context) error { pager := scenePager{ sceneFilter: sceneFilter, parentID: parentID, } var err error - objs, err = pager.getPageVideos(r, page, host) + objs, err = pager.getPageVideos(ctx, me.repository.SceneFinder, page, host) if err != nil { return err } @@ -511,8 +523,8 @@ func (me *contentDirectoryService) getAllScenes(host string) []interface{} { func (me *contentDirectoryService) getStudios() []interface{} { var objs []interface{} - if err := me.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error { - studios, err := r.Studio().All() + if err := txn.WithTxn(context.TODO(), me.txnManager, func(ctx context.Context) error { + studios, err := me.repository.StudioFinder.All(ctx) if err != nil { return err } @@ -550,8 +562,8 @@ func (me *contentDirectoryService) getStudioScenes(paths []string, host string) func (me *contentDirectoryService) getTags() []interface{} { var objs []interface{} - if err := me.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error { - tags, err := r.Tag().All() + if err := txn.WithTxn(context.TODO(), me.txnManager, func(ctx context.Context) error { + tags, err := me.repository.TagFinder.All(ctx) if err != nil { return err } @@ -589,8 +601,8 @@ func (me *contentDirectoryService) getTagScenes(paths []string, host string) []i func (me *contentDirectoryService) getPerformers() []interface{} { var objs []interface{} - if err := me.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error { - performers, err := r.Performer().All() + if err := txn.WithTxn(context.TODO(), me.txnManager, func(ctx context.Context) error { + performers, err := me.repository.PerformerFinder.All(ctx) if err != nil { return err } @@ -628,8 +640,8 @@ func (me *contentDirectoryService) getPerformerScenes(paths []string, host strin func (me *contentDirectoryService) getMovies() []interface{} { var objs []interface{} - if err := me.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error { - movies, err := r.Movie().All() + if err := txn.WithTxn(context.TODO(), me.txnManager, func(ctx context.Context) error { + movies, err := me.repository.MovieFinder.All(ctx) if err != nil { return err } diff --git a/internal/dlna/cds_test.go b/internal/dlna/cds_test.go index b52ca3b88..592f8f818 100644 --- a/internal/dlna/cds_test.go +++ b/internal/dlna/cds_test.go @@ -31,7 +31,6 @@ import ( "strings" "testing" - "github.com/stashapp/stash/pkg/models/mocks" "github.com/stretchr/testify/assert" ) @@ -59,8 +58,7 @@ func TestRootParentObjectID(t *testing.T) { func testHandleBrowse(argsXML string) (map[string]string, error) { cds := contentDirectoryService{ - Server: &Server{}, - txnManager: mocks.NewTransactionManager(), + Server: &Server{}, } r := &http.Request{} diff --git a/internal/dlna/dms.go b/internal/dlna/dms.go index a1ea8ceac..d5e7cc84e 100644 --- a/internal/dlna/dms.go +++ b/internal/dlna/dms.go @@ -48,8 +48,31 @@ import ( "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/txn" ) +type SceneFinder interface { + scene.Queryer + scene.IDFinder +} + +type StudioFinder interface { + All(ctx context.Context) ([]*models.Studio, error) +} + +type TagFinder interface { + All(ctx context.Context) ([]*models.Tag, error) +} + +type PerformerFinder interface { + All(ctx context.Context) ([]*models.Performer, error) +} + +type MovieFinder interface { + All(ctx context.Context) ([]*models.Movie, error) +} + const ( serverField = "Linux/3.4 DLNADOC/1.50 UPnP/1.0 DMS/1.0" rootDeviceType = "urn:schemas-upnp-org:device:MediaServer:1" @@ -249,7 +272,8 @@ type Server struct { // Time interval between SSPD announces NotifyInterval time.Duration - txnManager models.TransactionManager + txnManager txn.Manager + repository Repository sceneServer sceneServer ipWhitelistManager *ipWhitelistManager } @@ -415,12 +439,12 @@ func (me *Server) serveIcon(w http.ResponseWriter, r *http.Request) { } var scene *models.Scene - err := me.txnManager.WithReadTxn(r.Context(), func(r models.ReaderRepository) error { + err := txn.WithTxn(r.Context(), me.txnManager, func(ctx context.Context) error { idInt, err := strconv.Atoi(sceneId) if err != nil { return nil } - scene, _ = r.Scene().Find(idInt) + scene, _ = me.repository.SceneFinder.Find(ctx, idInt) return nil }) if err != nil { @@ -555,12 +579,12 @@ func (me *Server) initMux(mux *http.ServeMux) { mux.HandleFunc(resPath, func(w http.ResponseWriter, r *http.Request) { sceneId := r.URL.Query().Get("scene") var scene *models.Scene - err := me.txnManager.WithReadTxn(r.Context(), func(r models.ReaderRepository) error { + err := txn.WithTxn(r.Context(), me.txnManager, func(ctx context.Context) error { sceneIdInt, err := strconv.Atoi(sceneId) if err != nil { return nil } - scene, _ = r.Scene().Find(sceneIdInt) + scene, _ = me.repository.SceneFinder.Find(ctx, sceneIdInt) return nil }) if err != nil { @@ -595,8 +619,7 @@ func (me *Server) initMux(mux *http.ServeMux) { func (me *Server) initServices() { me.services = map[string]UPnPService{ "ContentDirectory": &contentDirectoryService{ - Server: me, - txnManager: me.txnManager, + Server: me, }, "ConnectionManager": &connectionManagerService{ Server: me, diff --git a/internal/dlna/paging.go b/internal/dlna/paging.go index 6f2afda8e..e5f65f96a 100644 --- a/internal/dlna/paging.go +++ b/internal/dlna/paging.go @@ -1,6 +1,7 @@ package dlna import ( + "context" "fmt" "math" "strconv" @@ -18,7 +19,7 @@ func (p *scenePager) getPageID(page int) string { return p.parentID + "/page/" + strconv.Itoa(page) } -func (p *scenePager) getPages(r models.ReaderRepository, total int) ([]interface{}, error) { +func (p *scenePager) getPages(ctx context.Context, r scene.Queryer, total int) ([]interface{}, error) { var objs []interface{} // get the first scene of each page to set an appropriate title @@ -37,7 +38,7 @@ func (p *scenePager) getPages(r models.ReaderRepository, total int) ([]interface if pages <= 10 || (page-1)%(pages/10) == 0 { thisPage := ((page - 1) * pageSize) + 1 findFilter.Page = &thisPage - scenes, err := scene.Query(r.Scene(), p.sceneFilter, findFilter) + scenes, err := scene.Query(ctx, r, p.sceneFilter, findFilter) if err != nil { return nil, err } @@ -58,7 +59,7 @@ func (p *scenePager) getPages(r models.ReaderRepository, total int) ([]interface return objs, nil } -func (p *scenePager) getPageVideos(r models.ReaderRepository, page int, host string) ([]interface{}, error) { +func (p *scenePager) getPageVideos(ctx context.Context, r SceneFinder, page int, host string) ([]interface{}, error) { var objs []interface{} sort := "title" @@ -68,7 +69,7 @@ func (p *scenePager) getPageVideos(r models.ReaderRepository, page int, host str Sort: &sort, } - scenes, err := scene.Query(r.Scene(), p.sceneFilter, findFilter) + scenes, err := scene.Query(ctx, r, p.sceneFilter, findFilter) if err != nil { return nil, err } diff --git a/internal/dlna/service.go b/internal/dlna/service.go index 6c14e8ee5..a257b7f94 100644 --- a/internal/dlna/service.go +++ b/internal/dlna/service.go @@ -8,10 +8,35 @@ import ( "sync" "time" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" ) +type Repository struct { + SceneFinder SceneFinder + FileFinder file.Finder + StudioFinder StudioFinder + TagFinder TagFinder + PerformerFinder PerformerFinder + MovieFinder MovieFinder +} + +type Status struct { + Running bool `json:"running"` + // If not currently running, time until it will be started. If running, time until it will be stopped + Until *time.Time `json:"until"` + RecentIPAddresses []string `json:"recentIPAddresses"` + AllowedIPAddresses []*Dlnaip `json:"allowedIPAddresses"` +} + +type Dlnaip struct { + IPAddress string `json:"ipAddress"` + // Time until IP will be no longer allowed/disallowed + Until *time.Time `json:"until"` +} + type dmsConfig struct { Path string IfNames []string @@ -34,7 +59,8 @@ type Config interface { } type Service struct { - txnManager models.TransactionManager + txnManager txn.Manager + repository Repository config Config sceneServer sceneServer ipWhitelistMgr *ipWhitelistManager @@ -107,6 +133,7 @@ func (s *Service) init() error { s.server = &Server{ txnManager: s.txnManager, sceneServer: s.sceneServer, + repository: s.repository, ipWhitelistManager: s.ipWhitelistMgr, Interfaces: interfaces, HTTPConn: func() net.Listener { @@ -167,9 +194,10 @@ func (s *Service) init() error { // } // NewService initialises and returns a new DLNA service. -func NewService(txnManager models.TransactionManager, cfg Config, sceneServer sceneServer) *Service { +func NewService(txnManager txn.Manager, repo Repository, cfg Config, sceneServer sceneServer) *Service { ret := &Service{ txnManager: txnManager, + repository: repo, sceneServer: sceneServer, config: cfg, ipWhitelistMgr: &ipWhitelistManager{ @@ -273,11 +301,11 @@ func (s *Service) IsRunning() bool { return s.running } -func (s *Service) Status() *models.DLNAStatus { +func (s *Service) Status() *Status { s.mutex.Lock() defer s.mutex.Unlock() - ret := &models.DLNAStatus{ + ret := &Status{ Running: s.running, RecentIPAddresses: s.ipWhitelistMgr.getRecent(), AllowedIPAddresses: s.ipWhitelistMgr.getTempAllowed(), diff --git a/internal/dlna/whitelist.go b/internal/dlna/whitelist.go index 447e68702..4bf14b20e 100644 --- a/internal/dlna/whitelist.go +++ b/internal/dlna/whitelist.go @@ -4,7 +4,6 @@ import ( "sync" "time" - "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/sliceutil/stringslice" ) @@ -59,11 +58,11 @@ func (m *ipWhitelistManager) getRecent() []string { return m.recentIPAddresses } -func (m *ipWhitelistManager) getTempAllowed() []*models.Dlnaip { +func (m *ipWhitelistManager) getTempAllowed() []*Dlnaip { m.mutex.Lock() defer m.mutex.Unlock() - var ret []*models.Dlnaip + var ret []*Dlnaip now := time.Now() removeExpired := false @@ -73,7 +72,7 @@ func (m *ipWhitelistManager) getTempAllowed() []*models.Dlnaip { continue } - ret = append(ret, &models.Dlnaip{ + ret = append(ret, &Dlnaip{ IPAddress: a.pattern, Until: a.until, }) diff --git a/internal/identify/identify.go b/internal/identify/identify.go index d57670465..98bcaa34e 100644 --- a/internal/identify/identify.go +++ b/internal/identify/identify.go @@ -2,17 +2,18 @@ package identify import ( "context" - "database/sql" "fmt" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/scraper" + "github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/utils" ) type SceneScraper interface { - ScrapeScene(ctx context.Context, sceneID int) (*models.ScrapedScene, error) + ScrapeScene(ctx context.Context, sceneID int) (*scraper.ScrapedScene, error) } type SceneUpdatePostHookExecutor interface { @@ -21,19 +22,24 @@ type SceneUpdatePostHookExecutor interface { type ScraperSource struct { Name string - Options *models.IdentifyMetadataOptionsInput + Options *MetadataOptions Scraper SceneScraper RemoteSite string } type SceneIdentifier struct { - DefaultOptions *models.IdentifyMetadataOptionsInput + SceneReaderUpdater SceneReaderUpdater + StudioCreator StudioCreator + PerformerCreator PerformerCreator + TagCreator TagCreator + + DefaultOptions *MetadataOptions Sources []ScraperSource ScreenshotSetter scene.ScreenshotSetter SceneUpdatePostHookExecutor SceneUpdatePostHookExecutor } -func (t *SceneIdentifier) Identify(ctx context.Context, txnManager models.TransactionManager, scene *models.Scene) error { +func (t *SceneIdentifier) Identify(ctx context.Context, txnManager txn.Manager, scene *models.Scene) error { result, err := t.scrapeScene(ctx, scene) if err != nil { return err @@ -53,7 +59,7 @@ func (t *SceneIdentifier) Identify(ctx context.Context, txnManager models.Transa } type scrapeResult struct { - result *models.ScrapedScene + result *scraper.ScrapedScene source ScraperSource } @@ -79,12 +85,12 @@ func (t *SceneIdentifier) scrapeScene(ctx context.Context, scene *models.Scene) return nil, nil } -func (t *SceneIdentifier) getSceneUpdater(ctx context.Context, s *models.Scene, result *scrapeResult, repo models.Repository) (*scene.UpdateSet, error) { +func (t *SceneIdentifier) getSceneUpdater(ctx context.Context, s *models.Scene, result *scrapeResult) (*scene.UpdateSet, error) { ret := &scene.UpdateSet{ ID: s.ID, } - options := []models.IdentifyMetadataOptionsInput{} + options := []MetadataOptions{} if result.source.Options != nil { options = append(options, *result.source.Options) } @@ -105,24 +111,24 @@ func (t *SceneIdentifier) getSceneUpdater(ctx context.Context, s *models.Scene, scraped := result.result rel := sceneRelationships{ - repo: repo, - scene: s, - result: result, - fieldOptions: fieldOptions, + sceneReader: t.SceneReaderUpdater, + studioCreator: t.StudioCreator, + performerCreator: t.PerformerCreator, + tagCreator: t.TagCreator, + scene: s, + result: result, + fieldOptions: fieldOptions, } ret.Partial = getScenePartial(s, scraped, fieldOptions, setOrganized) - studioID, err := rel.studio() + studioID, err := rel.studio(ctx) if err != nil { return nil, fmt.Errorf("error getting studio: %w", err) } if studioID != nil { - ret.Partial.StudioID = &sql.NullInt64{ - Int64: *studioID, - Valid: true, - } + ret.Partial.StudioID = models.NewOptionalInt(*studioID) } ignoreMale := false @@ -133,20 +139,38 @@ func (t *SceneIdentifier) getSceneUpdater(ctx context.Context, s *models.Scene, } } - ret.PerformerIDs, err = rel.performers(ignoreMale) + performerIDs, err := rel.performers(ctx, ignoreMale) if err != nil { return nil, err } + if performerIDs != nil { + ret.Partial.PerformerIDs = &models.UpdateIDs{ + IDs: performerIDs, + Mode: models.RelationshipUpdateModeSet, + } + } - ret.TagIDs, err = rel.tags() + tagIDs, err := rel.tags(ctx) if err != nil { return nil, err } + if tagIDs != nil { + ret.Partial.TagIDs = &models.UpdateIDs{ + IDs: tagIDs, + Mode: models.RelationshipUpdateModeSet, + } + } - ret.StashIDs, err = rel.stashIDs() + stashIDs, err := rel.stashIDs(ctx) if err != nil { return nil, err } + if stashIDs != nil { + ret.Partial.StashIDs = &models.UpdateStashIDs{ + StashIDs: stashIDs, + Mode: models.RelationshipUpdateModeSet, + } + } setCoverImage := false for _, o := range options { @@ -166,11 +190,22 @@ func (t *SceneIdentifier) getSceneUpdater(ctx context.Context, s *models.Scene, return ret, nil } -func (t *SceneIdentifier) modifyScene(ctx context.Context, txnManager models.TransactionManager, s *models.Scene, result *scrapeResult) error { +func (t *SceneIdentifier) modifyScene(ctx context.Context, txnManager txn.Manager, s *models.Scene, result *scrapeResult) error { var updater *scene.UpdateSet - if err := txnManager.WithTxn(ctx, func(repo models.Repository) error { + if err := txn.WithTxn(ctx, txnManager, func(ctx context.Context) error { + // load scene relationships + if err := s.LoadPerformerIDs(ctx, t.SceneReaderUpdater); err != nil { + return err + } + if err := s.LoadTagIDs(ctx, t.SceneReaderUpdater); err != nil { + return err + } + if err := s.LoadStashIDs(ctx, t.SceneReaderUpdater); err != nil { + return err + } + var err error - updater, err = t.getSceneUpdater(ctx, s, result, repo) + updater, err = t.getSceneUpdater(ctx, s, result) if err != nil { return err } @@ -181,15 +216,14 @@ func (t *SceneIdentifier) modifyScene(ctx context.Context, txnManager models.Tra return nil } - _, err = updater.Update(repo.Scene(), t.ScreenshotSetter) - if err != nil { + if _, err := updater.Update(ctx, t.SceneReaderUpdater, t.ScreenshotSetter); err != nil { return fmt.Errorf("error updating scene: %w", err) } as := "" title := updater.Partial.Title - if title != nil { - as = fmt.Sprintf(" as %s", title.String) + if title.Ptr() != nil { + as = fmt.Sprintf(" as %s", title.Value) } logger.Infof("Successfully identified %s%s using %s", s.Path, as, result.source.Name) @@ -208,9 +242,9 @@ func (t *SceneIdentifier) modifyScene(ctx context.Context, txnManager models.Tra return nil } -func getFieldOptions(options []models.IdentifyMetadataOptionsInput) map[string]*models.IdentifyFieldOptionsInput { +func getFieldOptions(options []MetadataOptions) map[string]*FieldOptions { // prefer source-specific field strategies, then the defaults - ret := make(map[string]*models.IdentifyFieldOptionsInput) + ret := make(map[string]*FieldOptions) for _, oo := range options { for _, f := range oo.FieldOptions { if _, found := ret[f.Field]; !found { @@ -222,54 +256,50 @@ func getFieldOptions(options []models.IdentifyMetadataOptionsInput) map[string]* return ret } -func getScenePartial(scene *models.Scene, scraped *models.ScrapedScene, fieldOptions map[string]*models.IdentifyFieldOptionsInput, setOrganized bool) models.ScenePartial { - partial := models.ScenePartial{ - ID: scene.ID, - } +func getScenePartial(scene *models.Scene, scraped *scraper.ScrapedScene, fieldOptions map[string]*FieldOptions, setOrganized bool) models.ScenePartial { + partial := models.ScenePartial{} - if scraped.Title != nil && scene.Title.String != *scraped.Title { - if shouldSetSingleValueField(fieldOptions["title"], scene.Title.String != "") { - partial.Title = models.NullStringPtr(*scraped.Title) + if scraped.Title != nil && (scene.Title != *scraped.Title) { + if shouldSetSingleValueField(fieldOptions["title"], scene.Title != "") { + partial.Title = models.NewOptionalString(*scraped.Title) } } - if scraped.Date != nil && scene.Date.String != *scraped.Date { - if shouldSetSingleValueField(fieldOptions["date"], scene.Date.Valid) { - partial.Date = &models.SQLiteDate{ - String: *scraped.Date, - Valid: true, - } + if scraped.Date != nil && (scene.Date == nil || scene.Date.String() != *scraped.Date) { + if shouldSetSingleValueField(fieldOptions["date"], scene.Date != nil) { + d := models.NewDate(*scraped.Date) + partial.Date = models.NewOptionalDate(d) } } - if scraped.Details != nil && scene.Details.String != *scraped.Details { - if shouldSetSingleValueField(fieldOptions["details"], scene.Details.String != "") { - partial.Details = models.NullStringPtr(*scraped.Details) + if scraped.Details != nil && (scene.Details != *scraped.Details) { + if shouldSetSingleValueField(fieldOptions["details"], scene.Details != "") { + partial.Details = models.NewOptionalString(*scraped.Details) } } - if scraped.URL != nil && scene.URL.String != *scraped.URL { - if shouldSetSingleValueField(fieldOptions["url"], scene.URL.String != "") { - partial.URL = models.NullStringPtr(*scraped.URL) + if scraped.URL != nil && (scene.URL != *scraped.URL) { + if shouldSetSingleValueField(fieldOptions["url"], scene.URL != "") { + partial.URL = models.NewOptionalString(*scraped.URL) } } if setOrganized && !scene.Organized { // just reuse the boolean since we know it's true - partial.Organized = &setOrganized + partial.Organized = models.NewOptionalBool(setOrganized) } return partial } -func shouldSetSingleValueField(strategy *models.IdentifyFieldOptionsInput, hasExistingValue bool) bool { +func shouldSetSingleValueField(strategy *FieldOptions, hasExistingValue bool) bool { // if unset then default to MERGE - fs := models.IdentifyFieldStrategyMerge + fs := FieldStrategyMerge if strategy != nil && strategy.Strategy.IsValid() { fs = strategy.Strategy } - if fs == models.IdentifyFieldStrategyIgnore { + if fs == FieldStrategyIgnore { return false } - return !hasExistingValue || fs == models.IdentifyFieldStrategyOverwrite + return !hasExistingValue || fs == FieldStrategyOverwrite } diff --git a/internal/identify/identify_test.go b/internal/identify/identify_test.go index 3a36015f4..a9395265d 100644 --- a/internal/identify/identify_test.go +++ b/internal/identify/identify_test.go @@ -8,16 +8,19 @@ import ( "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/mocks" + "github.com/stashapp/stash/pkg/scraper" "github.com/stashapp/stash/pkg/sliceutil/intslice" "github.com/stretchr/testify/mock" ) +var testCtx = context.Background() + type mockSceneScraper struct { errIDs []int - results map[int]*models.ScrapedScene + results map[int]*scraper.ScrapedScene } -func (s mockSceneScraper) ScrapeScene(ctx context.Context, sceneID int) (*models.ScrapedScene, error) { +func (s mockSceneScraper) ScrapeScene(ctx context.Context, sceneID int) (*scraper.ScrapedScene, error) { if intslice.IntInclude(s.errIDs, sceneID) { return nil, errors.New("scrape scene error") } @@ -42,12 +45,12 @@ func TestSceneIdentifier_Identify(t *testing.T) { var scrapedTitle = "scrapedTitle" - defaultOptions := &models.IdentifyMetadataOptionsInput{} + defaultOptions := &MetadataOptions{} sources := []ScraperSource{ { Scraper: mockSceneScraper{ errIDs: []int{errID1}, - results: map[int]*models.ScrapedScene{ + results: map[int]*scraper.ScrapedScene{ found1ID: { Title: &scrapedTitle, }, @@ -57,7 +60,7 @@ func TestSceneIdentifier_Identify(t *testing.T) { { Scraper: mockSceneScraper{ errIDs: []int{errID2}, - results: map[int]*models.ScrapedScene{ + results: map[int]*scraper.ScrapedScene{ found2ID: { Title: &scrapedTitle, }, @@ -69,13 +72,14 @@ func TestSceneIdentifier_Identify(t *testing.T) { }, } - repo := mocks.NewTransactionManager() - repo.Scene().(*mocks.SceneReaderWriter).On("Update", mock.MatchedBy(func(partial models.ScenePartial) bool { - return partial.ID != errUpdateID - })).Return(nil, nil) - repo.Scene().(*mocks.SceneReaderWriter).On("Update", mock.MatchedBy(func(partial models.ScenePartial) bool { - return partial.ID == errUpdateID - })).Return(nil, errors.New("update error")) + mockSceneReaderWriter := &mocks.SceneReaderWriter{} + + mockSceneReaderWriter.On("UpdatePartial", testCtx, mock.MatchedBy(func(id int) bool { + return id == errUpdateID + }), mock.Anything).Return(nil, errors.New("update error")) + mockSceneReaderWriter.On("UpdatePartial", testCtx, mock.MatchedBy(func(id int) bool { + return id != errUpdateID + }), mock.Anything).Return(nil, nil) tests := []struct { name string @@ -115,6 +119,7 @@ func TestSceneIdentifier_Identify(t *testing.T) { } identifier := SceneIdentifier{ + SceneReaderUpdater: mockSceneReaderWriter, DefaultOptions: defaultOptions, Sources: sources, SceneUpdatePostHookExecutor: mockHookExecutor{}, @@ -123,9 +128,12 @@ func TestSceneIdentifier_Identify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { scene := &models.Scene{ - ID: tt.sceneID, + ID: tt.sceneID, + PerformerIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + StashIDs: models.NewRelatedStashIDs([]models.StashID{}), } - if err := identifier.Identify(context.TODO(), repo, scene); (err != nil) != tt.wantErr { + if err := identifier.Identify(testCtx, &mocks.TxnManager{}, scene); (err != nil) != tt.wantErr { t.Errorf("SceneIdentifier.Identify() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -133,7 +141,9 @@ func TestSceneIdentifier_Identify(t *testing.T) { } func TestSceneIdentifier_modifyScene(t *testing.T) { - repo := mocks.NewTransactionManager() + repo := models.Repository{ + TxnManager: &mocks.TxnManager{}, + } tr := &SceneIdentifier{} type args struct { @@ -148,9 +158,13 @@ func TestSceneIdentifier_modifyScene(t *testing.T) { { "empty update", args{ - &models.Scene{}, + &models.Scene{ + PerformerIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + StashIDs: models.NewRelatedStashIDs([]models.StashID{}), + }, &scrapeResult{ - result: &models.ScrapedScene{}, + result: &scraper.ScrapedScene{}, }, }, false, @@ -158,7 +172,7 @@ func TestSceneIdentifier_modifyScene(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := tr.modifyScene(context.TODO(), repo, tt.args.scene, tt.args.result); (err != nil) != tt.wantErr { + if err := tr.modifyScene(testCtx, repo, tt.args.scene, tt.args.result); (err != nil) != tt.wantErr { t.Errorf("SceneIdentifier.modifyScene() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -173,55 +187,55 @@ func Test_getFieldOptions(t *testing.T) { ) type args struct { - options []models.IdentifyMetadataOptionsInput + options []MetadataOptions } tests := []struct { name string args args - want map[string]*models.IdentifyFieldOptionsInput + want map[string]*FieldOptions }{ { "simple", args{ - []models.IdentifyMetadataOptionsInput{ + []MetadataOptions{ { - FieldOptions: []*models.IdentifyFieldOptionsInput{ + FieldOptions: []*FieldOptions{ { Field: inFirst, - Strategy: models.IdentifyFieldStrategyIgnore, + Strategy: FieldStrategyIgnore, }, { Field: inBoth, - Strategy: models.IdentifyFieldStrategyIgnore, + Strategy: FieldStrategyIgnore, }, }, }, { - FieldOptions: []*models.IdentifyFieldOptionsInput{ + FieldOptions: []*FieldOptions{ { Field: inSecond, - Strategy: models.IdentifyFieldStrategyMerge, + Strategy: FieldStrategyMerge, }, { Field: inBoth, - Strategy: models.IdentifyFieldStrategyMerge, + Strategy: FieldStrategyMerge, }, }, }, }, }, - map[string]*models.IdentifyFieldOptionsInput{ + map[string]*FieldOptions{ inFirst: { Field: inFirst, - Strategy: models.IdentifyFieldStrategyIgnore, + Strategy: FieldStrategyIgnore, }, inSecond: { Field: inSecond, - Strategy: models.IdentifyFieldStrategyMerge, + Strategy: FieldStrategyMerge, }, inBoth: { Field: inBoth, - Strategy: models.IdentifyFieldStrategyIgnore, + Strategy: FieldStrategyIgnore, }, }, }, @@ -238,26 +252,26 @@ func Test_getFieldOptions(t *testing.T) { func Test_getScenePartial(t *testing.T) { var ( originalTitle = "originalTitle" - originalDate = "originalDate" + originalDate = "2001-01-01" originalDetails = "originalDetails" originalURL = "originalURL" ) var ( scrapedTitle = "scrapedTitle" - scrapedDate = "scrapedDate" + scrapedDate = "2002-02-02" scrapedDetails = "scrapedDetails" scrapedURL = "scrapedURL" ) + originalDateObj := models.NewDate(originalDate) + scrapedDateObj := models.NewDate(scrapedDate) + originalScene := &models.Scene{ - Title: models.NullString(originalTitle), - Date: models.SQLiteDate{ - String: originalDate, - Valid: true, - }, - Details: models.NullString(originalDetails), - URL: models.NullString(originalURL), + Title: originalTitle, + Date: &originalDateObj, + Details: originalDetails, + URL: originalURL, } organisedScene := *originalScene @@ -266,31 +280,28 @@ func Test_getScenePartial(t *testing.T) { emptyScene := &models.Scene{} postPartial := models.ScenePartial{ - Title: models.NullStringPtr(scrapedTitle), - Date: &models.SQLiteDate{ - String: scrapedDate, - Valid: true, - }, - Details: models.NullStringPtr(scrapedDetails), - URL: models.NullStringPtr(scrapedURL), + Title: models.NewOptionalString(scrapedTitle), + Date: models.NewOptionalDate(scrapedDateObj), + Details: models.NewOptionalString(scrapedDetails), + URL: models.NewOptionalString(scrapedURL), } - scrapedScene := &models.ScrapedScene{ + scrapedScene := &scraper.ScrapedScene{ Title: &scrapedTitle, Date: &scrapedDate, Details: &scrapedDetails, URL: &scrapedURL, } - scrapedUnchangedScene := &models.ScrapedScene{ + scrapedUnchangedScene := &scraper.ScrapedScene{ Title: &originalTitle, Date: &originalDate, Details: &originalDetails, URL: &originalURL, } - makeFieldOptions := func(input *models.IdentifyFieldOptionsInput) map[string]*models.IdentifyFieldOptionsInput { - return map[string]*models.IdentifyFieldOptionsInput{ + makeFieldOptions := func(input *FieldOptions) map[string]*FieldOptions { + return map[string]*FieldOptions{ "title": input, "date": input, "details": input, @@ -298,22 +309,22 @@ func Test_getScenePartial(t *testing.T) { } } - overwriteAll := makeFieldOptions(&models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + overwriteAll := makeFieldOptions(&FieldOptions{ + Strategy: FieldStrategyOverwrite, }) - ignoreAll := makeFieldOptions(&models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyIgnore, + ignoreAll := makeFieldOptions(&FieldOptions{ + Strategy: FieldStrategyIgnore, }) - mergeAll := makeFieldOptions(&models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyMerge, + mergeAll := makeFieldOptions(&FieldOptions{ + Strategy: FieldStrategyMerge, }) setOrganised := true type args struct { scene *models.Scene - scraped *models.ScrapedScene - fieldOptions map[string]*models.IdentifyFieldOptionsInput + scraped *scraper.ScrapedScene + fieldOptions map[string]*FieldOptions setOrganized bool } tests := []struct { @@ -380,7 +391,7 @@ func Test_getScenePartial(t *testing.T) { true, }, models.ScenePartial{ - Organized: &setOrganised, + Organized: models.NewOptionalBool(setOrganised), }, }, { @@ -407,7 +418,7 @@ func Test_shouldSetSingleValueField(t *testing.T) { const invalid = "invalid" type args struct { - strategy *models.IdentifyFieldOptionsInput + strategy *FieldOptions hasExistingValue bool } tests := []struct { @@ -418,8 +429,8 @@ func Test_shouldSetSingleValueField(t *testing.T) { { "ignore", args{ - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyIgnore, + &FieldOptions{ + Strategy: FieldStrategyIgnore, }, false, }, @@ -428,8 +439,8 @@ func Test_shouldSetSingleValueField(t *testing.T) { { "merge existing", args{ - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyMerge, + &FieldOptions{ + Strategy: FieldStrategyMerge, }, true, }, @@ -438,8 +449,8 @@ func Test_shouldSetSingleValueField(t *testing.T) { { "merge absent", args{ - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyMerge, + &FieldOptions{ + Strategy: FieldStrategyMerge, }, false, }, @@ -448,8 +459,8 @@ func Test_shouldSetSingleValueField(t *testing.T) { { "overwrite", args{ - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, }, true, }, @@ -458,7 +469,7 @@ func Test_shouldSetSingleValueField(t *testing.T) { { "nil (merge) existing", args{ - &models.IdentifyFieldOptionsInput{}, + &FieldOptions{}, true, }, false, @@ -466,7 +477,7 @@ func Test_shouldSetSingleValueField(t *testing.T) { { "nil (merge) absent", args{ - &models.IdentifyFieldOptionsInput{}, + &FieldOptions{}, false, }, true, @@ -474,7 +485,7 @@ func Test_shouldSetSingleValueField(t *testing.T) { { "invalid (merge) existing", args{ - &models.IdentifyFieldOptionsInput{ + &FieldOptions{ Strategy: invalid, }, true, @@ -484,7 +495,7 @@ func Test_shouldSetSingleValueField(t *testing.T) { { "invalid (merge) absent", args{ - &models.IdentifyFieldOptionsInput{ + &FieldOptions{ Strategy: invalid, }, false, diff --git a/internal/identify/options.go b/internal/identify/options.go new file mode 100644 index 000000000..84530e5fc --- /dev/null +++ b/internal/identify/options.go @@ -0,0 +1,92 @@ +package identify + +import ( + "fmt" + "io" + "strconv" + + "github.com/stashapp/stash/pkg/scraper" +) + +type Source struct { + Source *scraper.Source `json:"source"` + // Options defined for a source override the defaults + Options *MetadataOptions `json:"options"` +} + +type Options struct { + // An ordered list of sources to identify items with. Only the first source that finds a match is used. + Sources []*Source `json:"sources"` + // Options defined here override the configured defaults + Options *MetadataOptions `json:"options"` + // scene ids to identify + SceneIDs []string `json:"sceneIDs"` + // paths of scenes to identify - ignored if scene ids are set + Paths []string `json:"paths"` +} + +type MetadataOptions struct { + // any fields missing from here are defaulted to MERGE and createMissing false + FieldOptions []*FieldOptions `json:"fieldOptions"` + // defaults to true if not provided + SetCoverImage *bool `json:"setCoverImage"` + SetOrganized *bool `json:"setOrganized"` + // defaults to true if not provided + IncludeMalePerformers *bool `json:"includeMalePerformers"` +} + +type FieldOptions struct { + Field string `json:"field"` + Strategy FieldStrategy `json:"strategy"` + // creates missing objects if needed - only applicable for performers, tags and studios + CreateMissing *bool `json:"createMissing"` +} + +type FieldStrategy string + +const ( + // Never sets the field value + FieldStrategyIgnore FieldStrategy = "IGNORE" + // For multi-value fields, merge with existing. + // For single-value fields, ignore if already set + FieldStrategyMerge FieldStrategy = "MERGE" + // Always replaces the value if a value is found. + // For multi-value fields, any existing values are removed and replaced with the + // scraped values. + FieldStrategyOverwrite FieldStrategy = "OVERWRITE" +) + +var AllFieldStrategy = []FieldStrategy{ + FieldStrategyIgnore, + FieldStrategyMerge, + FieldStrategyOverwrite, +} + +func (e FieldStrategy) IsValid() bool { + switch e { + case FieldStrategyIgnore, FieldStrategyMerge, FieldStrategyOverwrite: + return true + } + return false +} + +func (e FieldStrategy) String() string { + return string(e) +} + +func (e *FieldStrategy) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = FieldStrategy(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid IdentifyFieldStrategy", str) + } + return nil +} + +func (e FieldStrategy) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} diff --git a/internal/identify/performer.go b/internal/identify/performer.go index 495c3eb8e..435524cc4 100644 --- a/internal/identify/performer.go +++ b/internal/identify/performer.go @@ -1,6 +1,7 @@ package identify import ( + "context" "database/sql" "fmt" "strconv" @@ -10,7 +11,12 @@ import ( "github.com/stashapp/stash/pkg/models" ) -func getPerformerID(endpoint string, r models.Repository, p *models.ScrapedPerformer, createMissing bool) (*int, error) { +type PerformerCreator interface { + Create(ctx context.Context, newPerformer models.Performer) (*models.Performer, error) + UpdateStashIDs(ctx context.Context, performerID int, stashIDs []models.StashID) error +} + +func getPerformerID(ctx context.Context, endpoint string, w PerformerCreator, p *models.ScrapedPerformer, createMissing bool) (*int, error) { if p.StoredID != nil { // existing performer, just add it performerID, err := strconv.Atoi(*p.StoredID) @@ -20,20 +26,20 @@ func getPerformerID(endpoint string, r models.Repository, p *models.ScrapedPerfo return &performerID, nil } else if createMissing && p.Name != nil { // name is mandatory - return createMissingPerformer(endpoint, r, p) + return createMissingPerformer(ctx, endpoint, w, p) } return nil, nil } -func createMissingPerformer(endpoint string, r models.Repository, p *models.ScrapedPerformer) (*int, error) { - created, err := r.Performer().Create(scrapedToPerformerInput(p)) +func createMissingPerformer(ctx context.Context, endpoint string, w PerformerCreator, p *models.ScrapedPerformer) (*int, error) { + created, err := w.Create(ctx, scrapedToPerformerInput(p)) if err != nil { return nil, fmt.Errorf("error creating performer: %w", err) } if endpoint != "" && p.RemoteSiteID != nil { - if err := r.Performer().UpdateStashIDs(created.ID, []models.StashID{ + if err := w.UpdateStashIDs(ctx, created.ID, []models.StashID{ { Endpoint: endpoint, StashID: *p.RemoteSiteID, diff --git a/internal/identify/performer_test.go b/internal/identify/performer_test.go index ebe8e49fe..eeed8a1e7 100644 --- a/internal/identify/performer_test.go +++ b/internal/identify/performer_test.go @@ -23,8 +23,8 @@ func Test_getPerformerID(t *testing.T) { validStoredID := 1 name := "name" - repo := mocks.NewTransactionManager() - repo.PerformerMock().On("Create", mock.Anything).Return(&models.Performer{ + mockPerformerReaderWriter := mocks.PerformerReaderWriter{} + mockPerformerReaderWriter.On("Create", testCtx, mock.Anything).Return(&models.Performer{ ID: validStoredID, }, nil) @@ -110,7 +110,7 @@ func Test_getPerformerID(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := getPerformerID(tt.args.endpoint, repo, tt.args.p, tt.args.createMissing) + got, err := getPerformerID(testCtx, tt.args.endpoint, &mockPerformerReaderWriter, tt.args.p, tt.args.createMissing) if (err != nil) != tt.wantErr { t.Errorf("getPerformerID() error = %v, wantErr %v", err, tt.wantErr) return @@ -131,23 +131,23 @@ func Test_createMissingPerformer(t *testing.T) { invalidName := "invalidName" performerID := 1 - repo := mocks.NewTransactionManager() - repo.PerformerMock().On("Create", mock.MatchedBy(func(p models.Performer) bool { + mockPerformerReaderWriter := mocks.PerformerReaderWriter{} + mockPerformerReaderWriter.On("Create", testCtx, mock.MatchedBy(func(p models.Performer) bool { return p.Name.String == validName })).Return(&models.Performer{ ID: performerID, }, nil) - repo.PerformerMock().On("Create", mock.MatchedBy(func(p models.Performer) bool { + mockPerformerReaderWriter.On("Create", testCtx, mock.MatchedBy(func(p models.Performer) bool { return p.Name.String == invalidName })).Return(nil, errors.New("error creating performer")) - repo.PerformerMock().On("UpdateStashIDs", performerID, []models.StashID{ + mockPerformerReaderWriter.On("UpdateStashIDs", testCtx, performerID, []models.StashID{ { Endpoint: invalidEndpoint, StashID: remoteSiteID, }, }).Return(errors.New("error updating stash ids")) - repo.PerformerMock().On("UpdateStashIDs", performerID, []models.StashID{ + mockPerformerReaderWriter.On("UpdateStashIDs", testCtx, performerID, []models.StashID{ { Endpoint: validEndpoint, StashID: remoteSiteID, @@ -213,7 +213,7 @@ func Test_createMissingPerformer(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := createMissingPerformer(tt.args.endpoint, repo, tt.args.p) + got, err := createMissingPerformer(testCtx, tt.args.endpoint, &mockPerformerReaderWriter, tt.args.p) if (err != nil) != tt.wantErr { t.Errorf("createMissingPerformer() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/internal/identify/scene.go b/internal/identify/scene.go index 166755451..d74b47d12 100644 --- a/internal/identify/scene.go +++ b/internal/identify/scene.go @@ -9,19 +9,35 @@ import ( "time" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/scene" "github.com/stashapp/stash/pkg/sliceutil" "github.com/stashapp/stash/pkg/sliceutil/intslice" "github.com/stashapp/stash/pkg/utils" ) -type sceneRelationships struct { - repo models.Repository - scene *models.Scene - result *scrapeResult - fieldOptions map[string]*models.IdentifyFieldOptionsInput +type SceneReaderUpdater interface { + GetCover(ctx context.Context, sceneID int) ([]byte, error) + scene.Updater + models.PerformerIDLoader + models.TagIDLoader + models.StashIDLoader } -func (g sceneRelationships) studio() (*int64, error) { +type TagCreator interface { + Create(ctx context.Context, newTag models.Tag) (*models.Tag, error) +} + +type sceneRelationships struct { + sceneReader SceneReaderUpdater + studioCreator StudioCreator + performerCreator PerformerCreator + tagCreator TagCreator + scene *models.Scene + result *scrapeResult + fieldOptions map[string]*FieldOptions +} + +func (g sceneRelationships) studio(ctx context.Context) (*int, error) { existingID := g.scene.StudioID fieldStrategy := g.fieldOptions["studio"] createMissing := fieldStrategy != nil && utils.IsTrue(fieldStrategy.CreateMissing) @@ -29,29 +45,29 @@ func (g sceneRelationships) studio() (*int64, error) { scraped := g.result.result.Studio endpoint := g.result.source.RemoteSite - if scraped == nil || !shouldSetSingleValueField(fieldStrategy, existingID.Valid) { + if scraped == nil || !shouldSetSingleValueField(fieldStrategy, existingID != nil) { return nil, nil } if scraped.StoredID != nil { // existing studio, just set it - studioID, err := strconv.ParseInt(*scraped.StoredID, 10, 64) + studioID, err := strconv.Atoi(*scraped.StoredID) if err != nil { return nil, fmt.Errorf("error converting studio ID %s: %w", *scraped.StoredID, err) } // only return value if different to current - if existingID.Int64 != studioID { + if existingID == nil || *existingID != studioID { return &studioID, nil } } else if createMissing { - return createMissingStudio(endpoint, g.repo, scraped) + return createMissingStudio(ctx, endpoint, g.studioCreator, scraped) } return nil, nil } -func (g sceneRelationships) performers(ignoreMale bool) ([]int, error) { +func (g sceneRelationships) performers(ctx context.Context, ignoreMale bool) ([]int, error) { fieldStrategy := g.fieldOptions["performers"] scraped := g.result.result.Performers @@ -61,21 +77,17 @@ func (g sceneRelationships) performers(ignoreMale bool) ([]int, error) { } createMissing := fieldStrategy != nil && utils.IsTrue(fieldStrategy.CreateMissing) - strategy := models.IdentifyFieldStrategyMerge + strategy := FieldStrategyMerge if fieldStrategy != nil { strategy = fieldStrategy.Strategy } - repo := g.repo endpoint := g.result.source.RemoteSite var performerIDs []int - originalPerformerIDs, err := repo.Scene().GetPerformerIDs(g.scene.ID) - if err != nil { - return nil, fmt.Errorf("error getting scene performers: %w", err) - } + originalPerformerIDs := g.scene.PerformerIDs.List() - if strategy == models.IdentifyFieldStrategyMerge { + if strategy == FieldStrategyMerge { // add to existing performerIDs = originalPerformerIDs } @@ -85,7 +97,7 @@ func (g sceneRelationships) performers(ignoreMale bool) ([]int, error) { continue } - performerID, err := getPerformerID(endpoint, repo, p, createMissing) + performerID, err := getPerformerID(ctx, endpoint, g.performerCreator, p, createMissing) if err != nil { return nil, err } @@ -103,11 +115,10 @@ func (g sceneRelationships) performers(ignoreMale bool) ([]int, error) { return performerIDs, nil } -func (g sceneRelationships) tags() ([]int, error) { +func (g sceneRelationships) tags(ctx context.Context) ([]int, error) { fieldStrategy := g.fieldOptions["tags"] scraped := g.result.result.Tags target := g.scene - r := g.repo // just check if ignored if len(scraped) == 0 || !shouldSetSingleValueField(fieldStrategy, false) { @@ -115,18 +126,15 @@ func (g sceneRelationships) tags() ([]int, error) { } createMissing := fieldStrategy != nil && utils.IsTrue(fieldStrategy.CreateMissing) - strategy := models.IdentifyFieldStrategyMerge + strategy := FieldStrategyMerge if fieldStrategy != nil { strategy = fieldStrategy.Strategy } var tagIDs []int - originalTagIDs, err := r.Scene().GetTagIDs(target.ID) - if err != nil { - return nil, fmt.Errorf("error getting scene tags: %w", err) - } + originalTagIDs := target.TagIDs.List() - if strategy == models.IdentifyFieldStrategyMerge { + if strategy == FieldStrategyMerge { // add to existing tagIDs = originalTagIDs } @@ -142,7 +150,7 @@ func (g sceneRelationships) tags() ([]int, error) { tagIDs = intslice.IntAppendUnique(tagIDs, int(tagID)) } else if createMissing { now := time.Now() - created, err := r.Tag().Create(models.Tag{ + created, err := g.tagCreator.Create(ctx, models.Tag{ Name: t.Name, CreatedAt: models.SQLiteTimestamp{Timestamp: now}, UpdatedAt: models.SQLiteTimestamp{Timestamp: now}, @@ -163,11 +171,10 @@ func (g sceneRelationships) tags() ([]int, error) { return tagIDs, nil } -func (g sceneRelationships) stashIDs() ([]models.StashID, error) { +func (g sceneRelationships) stashIDs(ctx context.Context) ([]models.StashID, error) { remoteSiteID := g.result.result.RemoteSiteID fieldStrategy := g.fieldOptions["stash_ids"] target := g.scene - r := g.repo endpoint := g.result.source.RemoteSite @@ -176,26 +183,18 @@ func (g sceneRelationships) stashIDs() ([]models.StashID, error) { return nil, nil } - strategy := models.IdentifyFieldStrategyMerge + strategy := FieldStrategyMerge if fieldStrategy != nil { strategy = fieldStrategy.Strategy } - var originalStashIDs []models.StashID var stashIDs []models.StashID - stashIDPtrs, err := r.Scene().GetStashIDs(target.ID) - if err != nil { - return nil, fmt.Errorf("error getting scene tag: %w", err) - } + originalStashIDs := target.StashIDs.List() - // convert existing to non-pointer types - for _, stashID := range stashIDPtrs { - originalStashIDs = append(originalStashIDs, *stashID) - } - - if strategy == models.IdentifyFieldStrategyMerge { + if strategy == FieldStrategyMerge { // add to existing - stashIDs = originalStashIDs + // make a copy so we don't modify the original + stashIDs = append(stashIDs, originalStashIDs...) } for i, stashID := range stashIDs { @@ -227,14 +226,13 @@ func (g sceneRelationships) stashIDs() ([]models.StashID, error) { func (g sceneRelationships) cover(ctx context.Context) ([]byte, error) { scraped := g.result.result.Image - r := g.repo if scraped == nil { return nil, nil } // always overwrite if present - existingCover, err := r.Scene().GetCover(g.scene.ID) + existingCover, err := g.sceneReader.GetCover(ctx, g.scene.ID) if err != nil { return nil, fmt.Errorf("error getting scene cover: %w", err) } diff --git a/internal/identify/scene_test.go b/internal/identify/scene_test.go index f0ba7da17..511023680 100644 --- a/internal/identify/scene_test.go +++ b/internal/identify/scene_test.go @@ -9,36 +9,37 @@ import ( "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/mocks" + "github.com/stashapp/stash/pkg/scraper" "github.com/stashapp/stash/pkg/utils" "github.com/stretchr/testify/mock" ) func Test_sceneRelationships_studio(t *testing.T) { validStoredID := "1" - var validStoredIDInt int64 = 1 + var validStoredIDInt = 1 invalidStoredID := "invalidStoredID" createMissing := true - defaultOptions := &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyMerge, + defaultOptions := &FieldOptions{ + Strategy: FieldStrategyMerge, } - repo := mocks.NewTransactionManager() - repo.StudioMock().On("Create", mock.Anything).Return(&models.Studio{ + mockStudioReaderWriter := &mocks.StudioReaderWriter{} + mockStudioReaderWriter.On("Create", testCtx, mock.Anything).Return(&models.Studio{ ID: int(validStoredIDInt), }, nil) tr := sceneRelationships{ - repo: repo, - fieldOptions: make(map[string]*models.IdentifyFieldOptionsInput), + studioCreator: mockStudioReaderWriter, + fieldOptions: make(map[string]*FieldOptions), } tests := []struct { name string scene *models.Scene - fieldOptions *models.IdentifyFieldOptionsInput + fieldOptions *FieldOptions result *models.ScrapedStudio - want *int64 + want *int wantErr bool }{ { @@ -52,8 +53,8 @@ func Test_sceneRelationships_studio(t *testing.T) { { "ignore", &models.Scene{}, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyIgnore, + &FieldOptions{ + Strategy: FieldStrategyIgnore, }, &models.ScrapedStudio{ StoredID: &validStoredID, @@ -74,7 +75,7 @@ func Test_sceneRelationships_studio(t *testing.T) { { "same stored id", &models.Scene{ - StudioID: models.NullInt64(validStoredIDInt), + StudioID: &validStoredIDInt, }, defaultOptions, &models.ScrapedStudio{ @@ -104,8 +105,8 @@ func Test_sceneRelationships_studio(t *testing.T) { { "create missing", &models.Scene{}, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyMerge, + &FieldOptions{ + Strategy: FieldStrategyMerge, CreateMissing: &createMissing, }, &models.ScrapedStudio{}, @@ -118,12 +119,12 @@ func Test_sceneRelationships_studio(t *testing.T) { tr.scene = tt.scene tr.fieldOptions["studio"] = tt.fieldOptions tr.result = &scrapeResult{ - result: &models.ScrapedScene{ + result: &scraper.ScrapedScene{ Studio: tt.result, }, } - got, err := tr.studio() + got, err := tr.studio(testCtx) if (err != nil) != tt.wantErr { t.Errorf("sceneRelationships.studio() error = %v, wantErr %v", err, tt.wantErr) return @@ -151,24 +152,33 @@ func Test_sceneRelationships_performers(t *testing.T) { female := models.GenderEnumFemale.String() male := models.GenderEnumMale.String() - defaultOptions := &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyMerge, + defaultOptions := &FieldOptions{ + Strategy: FieldStrategyMerge, } - repo := mocks.NewTransactionManager() - repo.SceneMock().On("GetPerformerIDs", sceneID).Return(nil, nil) - repo.SceneMock().On("GetPerformerIDs", sceneWithPerformerID).Return([]int{existingPerformerID}, nil) - repo.SceneMock().On("GetPerformerIDs", errSceneID).Return(nil, errors.New("error getting IDs")) + emptyScene := &models.Scene{ + ID: sceneID, + PerformerIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + StashIDs: models.NewRelatedStashIDs([]models.StashID{}), + } + + sceneWithPerformer := &models.Scene{ + ID: sceneWithPerformerID, + PerformerIDs: models.NewRelatedIDs([]int{ + existingPerformerID, + }), + } tr := sceneRelationships{ - repo: repo, - fieldOptions: make(map[string]*models.IdentifyFieldOptionsInput), + sceneReader: &mocks.SceneReaderWriter{}, + fieldOptions: make(map[string]*FieldOptions), } tests := []struct { name string - sceneID int - fieldOptions *models.IdentifyFieldOptionsInput + scene *models.Scene + fieldOptions *FieldOptions scraped []*models.ScrapedPerformer ignoreMale bool want []int @@ -176,9 +186,9 @@ func Test_sceneRelationships_performers(t *testing.T) { }{ { "ignore", - sceneID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyIgnore, + emptyScene, + &FieldOptions{ + Strategy: FieldStrategyIgnore, }, []*models.ScrapedPerformer{ { @@ -191,27 +201,16 @@ func Test_sceneRelationships_performers(t *testing.T) { }, { "none", - sceneID, + emptyScene, defaultOptions, []*models.ScrapedPerformer{}, false, nil, false, }, - { - "error getting ids", - errSceneID, - defaultOptions, - []*models.ScrapedPerformer{ - {}, - }, - false, - nil, - true, - }, { "merge existing", - sceneWithPerformerID, + sceneWithPerformer, defaultOptions, []*models.ScrapedPerformer{ { @@ -225,7 +224,7 @@ func Test_sceneRelationships_performers(t *testing.T) { }, { "merge add", - sceneWithPerformerID, + sceneWithPerformer, defaultOptions, []*models.ScrapedPerformer{ { @@ -239,7 +238,7 @@ func Test_sceneRelationships_performers(t *testing.T) { }, { "ignore male", - sceneID, + emptyScene, defaultOptions, []*models.ScrapedPerformer{ { @@ -254,9 +253,9 @@ func Test_sceneRelationships_performers(t *testing.T) { }, { "overwrite", - sceneWithPerformerID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + sceneWithPerformer, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, }, []*models.ScrapedPerformer{ { @@ -270,9 +269,9 @@ func Test_sceneRelationships_performers(t *testing.T) { }, { "ignore male (not male)", - sceneWithPerformerID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + sceneWithPerformer, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, }, []*models.ScrapedPerformer{ { @@ -287,9 +286,9 @@ func Test_sceneRelationships_performers(t *testing.T) { }, { "error getting tag ID", - sceneID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + emptyScene, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, CreateMissing: &createMissing, }, []*models.ScrapedPerformer{ @@ -305,17 +304,15 @@ func Test_sceneRelationships_performers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tr.scene = &models.Scene{ - ID: tt.sceneID, - } + tr.scene = tt.scene tr.fieldOptions["performers"] = tt.fieldOptions tr.result = &scrapeResult{ - result: &models.ScrapedScene{ + result: &scraper.ScrapedScene{ Performers: tt.scraped, }, } - got, err := tr.performers(tt.ignoreMale) + got, err := tr.performers(testCtx, tt.ignoreMale) if (err != nil) != tt.wantErr { t.Errorf("sceneRelationships.performers() error = %v, wantErr %v", err, tt.wantErr) return @@ -342,42 +339,57 @@ func Test_sceneRelationships_tags(t *testing.T) { validName := "validName" invalidName := "invalidName" - defaultOptions := &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyMerge, + defaultOptions := &FieldOptions{ + Strategy: FieldStrategyMerge, } - repo := mocks.NewTransactionManager() - repo.SceneMock().On("GetTagIDs", sceneID).Return(nil, nil) - repo.SceneMock().On("GetTagIDs", sceneWithTagID).Return([]int{existingID}, nil) - repo.SceneMock().On("GetTagIDs", errSceneID).Return(nil, errors.New("error getting IDs")) + emptyScene := &models.Scene{ + ID: sceneID, + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + StashIDs: models.NewRelatedStashIDs([]models.StashID{}), + } - repo.TagMock().On("Create", mock.MatchedBy(func(p models.Tag) bool { + sceneWithTag := &models.Scene{ + ID: sceneWithTagID, + TagIDs: models.NewRelatedIDs([]int{ + existingID, + }), + PerformerIDs: models.NewRelatedIDs([]int{}), + StashIDs: models.NewRelatedStashIDs([]models.StashID{}), + } + + mockSceneReaderWriter := &mocks.SceneReaderWriter{} + mockTagReaderWriter := &mocks.TagReaderWriter{} + + mockTagReaderWriter.On("Create", testCtx, mock.MatchedBy(func(p models.Tag) bool { return p.Name == validName })).Return(&models.Tag{ ID: validStoredIDInt, }, nil) - repo.TagMock().On("Create", mock.MatchedBy(func(p models.Tag) bool { + mockTagReaderWriter.On("Create", testCtx, mock.MatchedBy(func(p models.Tag) bool { return p.Name == invalidName })).Return(nil, errors.New("error creating tag")) tr := sceneRelationships{ - repo: repo, - fieldOptions: make(map[string]*models.IdentifyFieldOptionsInput), + sceneReader: mockSceneReaderWriter, + tagCreator: mockTagReaderWriter, + fieldOptions: make(map[string]*FieldOptions), } tests := []struct { name string - sceneID int - fieldOptions *models.IdentifyFieldOptionsInput + scene *models.Scene + fieldOptions *FieldOptions scraped []*models.ScrapedTag want []int wantErr bool }{ { "ignore", - sceneID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyIgnore, + emptyScene, + &FieldOptions{ + Strategy: FieldStrategyIgnore, }, []*models.ScrapedTag{ { @@ -389,25 +401,15 @@ func Test_sceneRelationships_tags(t *testing.T) { }, { "none", - sceneID, + emptyScene, defaultOptions, []*models.ScrapedTag{}, nil, false, }, - { - "error getting ids", - errSceneID, - defaultOptions, - []*models.ScrapedTag{ - {}, - }, - nil, - true, - }, { "merge existing", - sceneWithTagID, + sceneWithTag, defaultOptions, []*models.ScrapedTag{ { @@ -420,7 +422,7 @@ func Test_sceneRelationships_tags(t *testing.T) { }, { "merge add", - sceneWithTagID, + sceneWithTag, defaultOptions, []*models.ScrapedTag{ { @@ -433,9 +435,9 @@ func Test_sceneRelationships_tags(t *testing.T) { }, { "overwrite", - sceneWithTagID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + sceneWithTag, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, }, []*models.ScrapedTag{ { @@ -448,9 +450,9 @@ func Test_sceneRelationships_tags(t *testing.T) { }, { "error getting tag ID", - sceneID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + emptyScene, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, }, []*models.ScrapedTag{ { @@ -463,9 +465,9 @@ func Test_sceneRelationships_tags(t *testing.T) { }, { "create missing", - sceneID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + emptyScene, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, CreateMissing: &createMissing, }, []*models.ScrapedTag{ @@ -478,9 +480,9 @@ func Test_sceneRelationships_tags(t *testing.T) { }, { "error creating", - sceneID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + emptyScene, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, CreateMissing: &createMissing, }, []*models.ScrapedTag{ @@ -494,17 +496,15 @@ func Test_sceneRelationships_tags(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tr.scene = &models.Scene{ - ID: tt.sceneID, - } + tr.scene = tt.scene tr.fieldOptions["tags"] = tt.fieldOptions tr.result = &scrapeResult{ - result: &models.ScrapedScene{ + result: &scraper.ScrapedScene{ Tags: tt.scraped, }, } - got, err := tr.tags() + got, err := tr.tags(testCtx) if (err != nil) != tt.wantErr { t.Errorf("sceneRelationships.tags() error = %v, wantErr %v", err, tt.wantErr) return @@ -529,29 +529,35 @@ func Test_sceneRelationships_stashIDs(t *testing.T) { remoteSiteID := "remoteSiteID" newRemoteSiteID := "newRemoteSiteID" - defaultOptions := &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyMerge, + defaultOptions := &FieldOptions{ + Strategy: FieldStrategyMerge, } - repo := mocks.NewTransactionManager() - repo.SceneMock().On("GetStashIDs", sceneID).Return(nil, nil) - repo.SceneMock().On("GetStashIDs", sceneWithStashID).Return([]*models.StashID{ - { - StashID: remoteSiteID, - Endpoint: existingEndpoint, - }, - }, nil) - repo.SceneMock().On("GetStashIDs", errSceneID).Return(nil, errors.New("error getting IDs")) + emptyScene := &models.Scene{ + ID: sceneID, + } + + sceneWithStashIDs := &models.Scene{ + ID: sceneWithStashID, + StashIDs: models.NewRelatedStashIDs([]models.StashID{ + { + StashID: remoteSiteID, + Endpoint: existingEndpoint, + }, + }), + } + + mockSceneReaderWriter := &mocks.SceneReaderWriter{} tr := sceneRelationships{ - repo: repo, - fieldOptions: make(map[string]*models.IdentifyFieldOptionsInput), + sceneReader: mockSceneReaderWriter, + fieldOptions: make(map[string]*FieldOptions), } tests := []struct { name string - sceneID int - fieldOptions *models.IdentifyFieldOptionsInput + scene *models.Scene + fieldOptions *FieldOptions endpoint string remoteSiteID *string want []models.StashID @@ -559,9 +565,9 @@ func Test_sceneRelationships_stashIDs(t *testing.T) { }{ { "ignore", - sceneID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyIgnore, + emptyScene, + &FieldOptions{ + Strategy: FieldStrategyIgnore, }, newEndpoint, &remoteSiteID, @@ -570,7 +576,7 @@ func Test_sceneRelationships_stashIDs(t *testing.T) { }, { "no endpoint", - sceneID, + emptyScene, defaultOptions, "", &remoteSiteID, @@ -579,25 +585,16 @@ func Test_sceneRelationships_stashIDs(t *testing.T) { }, { "no site id", - sceneID, + emptyScene, defaultOptions, newEndpoint, nil, nil, false, }, - { - "error getting ids", - errSceneID, - defaultOptions, - newEndpoint, - &remoteSiteID, - nil, - true, - }, { "merge existing", - sceneWithStashID, + sceneWithStashIDs, defaultOptions, existingEndpoint, &remoteSiteID, @@ -606,7 +603,7 @@ func Test_sceneRelationships_stashIDs(t *testing.T) { }, { "merge existing new value", - sceneWithStashID, + sceneWithStashIDs, defaultOptions, existingEndpoint, &newRemoteSiteID, @@ -620,7 +617,7 @@ func Test_sceneRelationships_stashIDs(t *testing.T) { }, { "merge add", - sceneWithStashID, + sceneWithStashIDs, defaultOptions, newEndpoint, &newRemoteSiteID, @@ -638,9 +635,9 @@ func Test_sceneRelationships_stashIDs(t *testing.T) { }, { "overwrite", - sceneWithStashID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + sceneWithStashIDs, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, }, newEndpoint, &newRemoteSiteID, @@ -654,9 +651,9 @@ func Test_sceneRelationships_stashIDs(t *testing.T) { }, { "overwrite same", - sceneWithStashID, - &models.IdentifyFieldOptionsInput{ - Strategy: models.IdentifyFieldStrategyOverwrite, + sceneWithStashIDs, + &FieldOptions{ + Strategy: FieldStrategyOverwrite, }, existingEndpoint, &remoteSiteID, @@ -666,26 +663,24 @@ func Test_sceneRelationships_stashIDs(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tr.scene = &models.Scene{ - ID: tt.sceneID, - } + tr.scene = tt.scene tr.fieldOptions["stash_ids"] = tt.fieldOptions tr.result = &scrapeResult{ source: ScraperSource{ RemoteSite: tt.endpoint, }, - result: &models.ScrapedScene{ + result: &scraper.ScrapedScene{ RemoteSiteID: tt.remoteSiteID, }, } - got, err := tr.stashIDs() + got, err := tr.stashIDs(testCtx) if (err != nil) != tt.wantErr { t.Errorf("sceneRelationships.stashIDs() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { - t.Errorf("sceneRelationships.stashIDs() = %v, want %v", got, tt.want) + t.Errorf("sceneRelationships.stashIDs() = %+v, want %+v", got, tt.want) } }) } @@ -706,13 +701,13 @@ func Test_sceneRelationships_cover(t *testing.T) { newDataEncoded := base64Prefix + utils.GetBase64StringFromData(newData) invalidData := newDataEncoded + "!!!" - repo := mocks.NewTransactionManager() - repo.SceneMock().On("GetCover", sceneID).Return(existingData, nil) - repo.SceneMock().On("GetCover", errSceneID).Return(nil, errors.New("error getting cover")) + mockSceneReaderWriter := &mocks.SceneReaderWriter{} + mockSceneReaderWriter.On("GetCover", testCtx, sceneID).Return(existingData, nil) + mockSceneReaderWriter.On("GetCover", testCtx, errSceneID).Return(nil, errors.New("error getting cover")) tr := sceneRelationships{ - repo: repo, - fieldOptions: make(map[string]*models.IdentifyFieldOptionsInput), + sceneReader: mockSceneReaderWriter, + fieldOptions: make(map[string]*FieldOptions), } tests := []struct { @@ -764,7 +759,7 @@ func Test_sceneRelationships_cover(t *testing.T) { ID: tt.sceneID, } tr.result = &scrapeResult{ - result: &models.ScrapedScene{ + result: &scraper.ScrapedScene{ Image: tt.image, }, } diff --git a/internal/identify/studio.go b/internal/identify/studio.go index 86cb6b737..135e1a79d 100644 --- a/internal/identify/studio.go +++ b/internal/identify/studio.go @@ -1,6 +1,7 @@ package identify import ( + "context" "database/sql" "fmt" "time" @@ -9,14 +10,19 @@ import ( "github.com/stashapp/stash/pkg/models" ) -func createMissingStudio(endpoint string, repo models.Repository, studio *models.ScrapedStudio) (*int64, error) { - created, err := repo.Studio().Create(scrapedToStudioInput(studio)) +type StudioCreator interface { + Create(ctx context.Context, newStudio models.Studio) (*models.Studio, error) + UpdateStashIDs(ctx context.Context, studioID int, stashIDs []models.StashID) error +} + +func createMissingStudio(ctx context.Context, endpoint string, w StudioCreator, studio *models.ScrapedStudio) (*int, error) { + created, err := w.Create(ctx, scrapedToStudioInput(studio)) if err != nil { return nil, fmt.Errorf("error creating studio: %w", err) } if endpoint != "" && studio.RemoteSiteID != nil { - if err := repo.Studio().UpdateStashIDs(created.ID, []models.StashID{ + if err := w.UpdateStashIDs(ctx, created.ID, []models.StashID{ { Endpoint: endpoint, StashID: *studio.RemoteSiteID, @@ -26,8 +32,7 @@ func createMissingStudio(endpoint string, repo models.Repository, studio *models } } - createdID := int64(created.ID) - return &createdID, nil + return &created.ID, nil } func scrapedToStudioInput(studio *models.ScrapedStudio) models.Studio { diff --git a/internal/identify/studio_test.go b/internal/identify/studio_test.go index 2ba0b840e..172d12df3 100644 --- a/internal/identify/studio_test.go +++ b/internal/identify/studio_test.go @@ -18,25 +18,25 @@ func Test_createMissingStudio(t *testing.T) { validName := "validName" invalidName := "invalidName" createdID := 1 - createdID64 := int64(createdID) - repo := mocks.NewTransactionManager() - repo.StudioMock().On("Create", mock.MatchedBy(func(p models.Studio) bool { + repo := mocks.NewTxnRepository() + mockStudioReaderWriter := repo.Studio.(*mocks.StudioReaderWriter) + mockStudioReaderWriter.On("Create", testCtx, mock.MatchedBy(func(p models.Studio) bool { return p.Name.String == validName })).Return(&models.Studio{ ID: createdID, }, nil) - repo.StudioMock().On("Create", mock.MatchedBy(func(p models.Studio) bool { + mockStudioReaderWriter.On("Create", testCtx, mock.MatchedBy(func(p models.Studio) bool { return p.Name.String == invalidName })).Return(nil, errors.New("error creating performer")) - repo.StudioMock().On("UpdateStashIDs", createdID, []models.StashID{ + mockStudioReaderWriter.On("UpdateStashIDs", testCtx, createdID, []models.StashID{ { Endpoint: invalidEndpoint, StashID: remoteSiteID, }, }).Return(errors.New("error updating stash ids")) - repo.StudioMock().On("UpdateStashIDs", createdID, []models.StashID{ + mockStudioReaderWriter.On("UpdateStashIDs", testCtx, createdID, []models.StashID{ { Endpoint: validEndpoint, StashID: remoteSiteID, @@ -50,7 +50,7 @@ func Test_createMissingStudio(t *testing.T) { tests := []struct { name string args args - want *int64 + want *int wantErr bool }{ { @@ -61,7 +61,7 @@ func Test_createMissingStudio(t *testing.T) { Name: validName, }, }, - &createdID64, + &createdID, false, }, { @@ -84,7 +84,7 @@ func Test_createMissingStudio(t *testing.T) { RemoteSiteID: &remoteSiteID, }, }, - &createdID64, + &createdID, false, }, { @@ -102,13 +102,13 @@ func Test_createMissingStudio(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := createMissingStudio(tt.args.endpoint, repo, tt.args.studio) + got, err := createMissingStudio(testCtx, tt.args.endpoint, mockStudioReaderWriter, tt.args.studio) if (err != nil) != tt.wantErr { t.Errorf("createMissingStudio() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { - t.Errorf("createMissingStudio() = %v, want %v", got, tt.want) + t.Errorf("createMissingStudio() = %d, want %d", got, tt.want) } }) } diff --git a/internal/manager/checksum.go b/internal/manager/checksum.go index 469f2c47f..53f368913 100644 --- a/internal/manager/checksum.go +++ b/internal/manager/checksum.go @@ -7,16 +7,21 @@ import ( "github.com/stashapp/stash/internal/manager/config" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" ) -func setInitialMD5Config(ctx context.Context, txnManager models.TransactionManager) { +type SceneCounter interface { + Count(ctx context.Context) (int, error) +} + +func setInitialMD5Config(ctx context.Context, txnManager txn.Manager, counter SceneCounter) { // if there are no scene files in the database, then default the // VideoFileNamingAlgorithm config setting to oshash and calculateMD5 to // false, otherwise set them to true for backwards compatibility purposes var count int - if err := txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { + if err := txn.WithTxn(ctx, txnManager, func(ctx context.Context) error { var err error - count, err = r.Scene().Count() + count, err = counter.Count(ctx) return err }); err != nil { logger.Errorf("Error while counting scenes: %s", err.Error()) @@ -36,6 +41,11 @@ func setInitialMD5Config(ctx context.Context, txnManager models.TransactionManag } } +type SceneMissingHashCounter interface { + CountMissingChecksum(ctx context.Context) (int, error) + CountMissingOSHash(ctx context.Context) (int, error) +} + // ValidateVideoFileNamingAlgorithm validates changing the // VideoFileNamingAlgorithm configuration flag. // @@ -44,30 +54,27 @@ func setInitialMD5Config(ctx context.Context, txnManager models.TransactionManag // // Likewise, if VideoFileNamingAlgorithm is set to oshash, then this function // will ensure that all oshash values are set on all scenes. -func ValidateVideoFileNamingAlgorithm(txnManager models.TransactionManager, newValue models.HashAlgorithm) error { +func ValidateVideoFileNamingAlgorithm(ctx context.Context, qb SceneMissingHashCounter, newValue models.HashAlgorithm) error { // if algorithm is being set to MD5, then all checksums must be present - return txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error { - qb := r.Scene() - if newValue == models.HashAlgorithmMd5 { - missingMD5, err := qb.CountMissingChecksum() - if err != nil { - return err - } - - if missingMD5 > 0 { - return errors.New("some checksums are missing on scenes. Run Scan with calculateMD5 set to true") - } - } else if newValue == models.HashAlgorithmOshash { - missingOSHash, err := qb.CountMissingOSHash() - if err != nil { - return err - } - - if missingOSHash > 0 { - return errors.New("some oshash values are missing on scenes. Run Scan to populate") - } + if newValue == models.HashAlgorithmMd5 { + missingMD5, err := qb.CountMissingChecksum(ctx) + if err != nil { + return err } - return nil - }) + if missingMD5 > 0 { + return errors.New("some checksums are missing on scenes. Run Scan with calculateMD5 set to true") + } + } else if newValue == models.HashAlgorithmOshash { + missingOSHash, err := qb.CountMissingOSHash(ctx) + if err != nil { + return err + } + + if missingOSHash > 0 { + return errors.New("some oshash values are missing on scenes. Run Scan to populate") + } + } + + return nil } diff --git a/internal/manager/config/config.go b/internal/manager/config/config.go index 8b3725080..3be7be32a 100644 --- a/internal/manager/config/config.go +++ b/internal/manager/config/config.go @@ -15,6 +15,7 @@ import ( "github.com/spf13/viper" + "github.com/stashapp/stash/internal/identify" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/hash" "github.com/stashapp/stash/pkg/logger" @@ -147,10 +148,10 @@ const ( // Image lightbox options legacyImageLightboxSlideshowDelay = "slideshow_delay" ImageLightboxSlideshowDelay = "image_lightbox.slideshow_delay" - ImageLightboxDisplayMode = "image_lightbox.display_mode" + ImageLightboxDisplayModeKey = "image_lightbox.display_mode" ImageLightboxScaleUp = "image_lightbox.scale_up" ImageLightboxResetZoomOnNav = "image_lightbox.reset_zoom_on_nav" - ImageLightboxScrollMode = "image_lightbox.scroll_mode" + ImageLightboxScrollModeKey = "image_lightbox.scroll_mode" ImageLightboxScrollAttemptsBeforeChange = "image_lightbox.scroll_attempts_before_change" UI = "ui" @@ -310,8 +311,7 @@ func (i *Instance) GetNotificationsEnabled() bool { // GetShowOneTimeMovedNotification shows whether a small notification to inform the user that Stash // will no longer show a terminal window, and instead will be available in the tray, should be shown. -// -// It is true when an existing system is started after upgrading, and set to false forever after it is shown. +// It is true when an existing system is started after upgrading, and set to false forever after it is shown. func (i *Instance) GetShowOneTimeMovedNotification() bool { return i.getBool(ShowOneTimeMovedNotification) } @@ -460,14 +460,27 @@ func (i *Instance) getStringMapString(key string) map[string]string { return i.viper(key).GetStringMapString(key) } +type StashConfig struct { + Path string `json:"path"` + ExcludeVideo bool `json:"excludeVideo"` + ExcludeImage bool `json:"excludeImage"` +} + +// Stash configuration details +type StashConfigInput struct { + Path string `json:"path"` + ExcludeVideo bool `json:"excludeVideo"` + ExcludeImage bool `json:"excludeImage"` +} + // GetStathPaths returns the configured stash library paths. // Works opposite to the usual case - it will return the override // value only if the main value is not set. -func (i *Instance) GetStashPaths() []*models.StashConfig { +func (i *Instance) GetStashPaths() []*StashConfig { i.RLock() defer i.RUnlock() - var ret []*models.StashConfig + var ret []*StashConfig v := i.main if !v.IsSet(Stash) { @@ -479,7 +492,7 @@ func (i *Instance) GetStashPaths() []*models.StashConfig { ss := v.GetStringSlice(Stash) ret = nil for _, path := range ss { - toAdd := &models.StashConfig{ + toAdd := &StashConfig{ Path: path, } ret = append(ret, toAdd) @@ -610,8 +623,8 @@ func (i *Instance) GetScraperExcludeTagPatterns() []string { return i.getStringSlice(ScraperExcludeTagPatterns) } -func (i *Instance) GetStashBoxes() models.StashBoxes { - var boxes models.StashBoxes +func (i *Instance) GetStashBoxes() []*models.StashBox { + var boxes []*models.StashBox if err := i.unmarshalKey(StashBoxes, &boxes); err != nil { logger.Warnf("error in unmarshalkey: %v", err) } @@ -797,7 +810,13 @@ func (i *Instance) ValidateCredentials(username string, password string) bool { var stashBoxRe = regexp.MustCompile("^http.*graphql$") -func (i *Instance) ValidateStashBoxes(boxes []*models.StashBoxInput) error { +type StashBoxInput struct { + Endpoint string `json:"endpoint"` + APIKey string `json:"api_key"` + Name string `json:"name"` +} + +func (i *Instance) ValidateStashBoxes(boxes []*StashBoxInput) error { isMulti := len(boxes) > 1 for _, box := range boxes { @@ -933,18 +952,18 @@ func (i *Instance) getSlideshowDelay() int { return ret } -func (i *Instance) GetImageLightboxOptions() models.ConfigImageLightboxResult { +func (i *Instance) GetImageLightboxOptions() ConfigImageLightboxResult { i.RLock() defer i.RUnlock() delay := i.getSlideshowDelay() - ret := models.ConfigImageLightboxResult{ + ret := ConfigImageLightboxResult{ SlideshowDelay: &delay, } - if v := i.viperWith(ImageLightboxDisplayMode); v != nil { - mode := models.ImageLightboxDisplayMode(v.GetString(ImageLightboxDisplayMode)) + if v := i.viperWith(ImageLightboxDisplayModeKey); v != nil { + mode := ImageLightboxDisplayMode(v.GetString(ImageLightboxDisplayModeKey)) ret.DisplayMode = &mode } if v := i.viperWith(ImageLightboxScaleUp); v != nil { @@ -955,8 +974,8 @@ func (i *Instance) GetImageLightboxOptions() models.ConfigImageLightboxResult { value := v.GetBool(ImageLightboxResetZoomOnNav) ret.ResetZoomOnNav = &value } - if v := i.viperWith(ImageLightboxScrollMode); v != nil { - mode := models.ImageLightboxScrollMode(v.GetString(ImageLightboxScrollMode)) + if v := i.viperWith(ImageLightboxScrollModeKey); v != nil { + mode := ImageLightboxScrollMode(v.GetString(ImageLightboxScrollModeKey)) ret.ScrollMode = &mode } if v := i.viperWith(ImageLightboxScrollAttemptsBeforeChange); v != nil { @@ -966,8 +985,8 @@ func (i *Instance) GetImageLightboxOptions() models.ConfigImageLightboxResult { return ret } -func (i *Instance) GetDisableDropdownCreate() *models.ConfigDisableDropdownCreate { - return &models.ConfigDisableDropdownCreate{ +func (i *Instance) GetDisableDropdownCreate() *ConfigDisableDropdownCreate { + return &ConfigDisableDropdownCreate{ Performer: i.getBool(DisableDropdownCreatePerformer), Studio: i.getBool(DisableDropdownCreateStudio), Tag: i.getBool(DisableDropdownCreateTag), @@ -1056,13 +1075,13 @@ func (i *Instance) GetDeleteGeneratedDefault() bool { // GetDefaultIdentifySettings returns the default Identify task settings. // Returns nil if the settings could not be unmarshalled, or if it // has not been set. -func (i *Instance) GetDefaultIdentifySettings() *models.IdentifyMetadataTaskOptions { +func (i *Instance) GetDefaultIdentifySettings() *identify.Options { i.RLock() defer i.RUnlock() v := i.viper(DefaultIdentifySettings) if v.IsSet(DefaultIdentifySettings) { - var ret models.IdentifyMetadataTaskOptions + var ret identify.Options if err := v.UnmarshalKey(DefaultIdentifySettings, &ret); err != nil { return nil } @@ -1075,13 +1094,13 @@ func (i *Instance) GetDefaultIdentifySettings() *models.IdentifyMetadataTaskOpti // GetDefaultScanSettings returns the default Scan task settings. // Returns nil if the settings could not be unmarshalled, or if it // has not been set. -func (i *Instance) GetDefaultScanSettings() *models.ScanMetadataOptions { +func (i *Instance) GetDefaultScanSettings() *ScanMetadataOptions { i.RLock() defer i.RUnlock() v := i.viper(DefaultScanSettings) if v.IsSet(DefaultScanSettings) { - var ret models.ScanMetadataOptions + var ret ScanMetadataOptions if err := v.UnmarshalKey(DefaultScanSettings, &ret); err != nil { return nil } @@ -1094,13 +1113,13 @@ func (i *Instance) GetDefaultScanSettings() *models.ScanMetadataOptions { // GetDefaultAutoTagSettings returns the default Scan task settings. // Returns nil if the settings could not be unmarshalled, or if it // has not been set. -func (i *Instance) GetDefaultAutoTagSettings() *models.AutoTagMetadataOptions { +func (i *Instance) GetDefaultAutoTagSettings() *AutoTagMetadataOptions { i.RLock() defer i.RUnlock() v := i.viper(DefaultAutoTagSettings) if v.IsSet(DefaultAutoTagSettings) { - var ret models.AutoTagMetadataOptions + var ret AutoTagMetadataOptions if err := v.UnmarshalKey(DefaultAutoTagSettings, &ret); err != nil { return nil } diff --git a/internal/manager/config/tasks.go b/internal/manager/config/tasks.go new file mode 100644 index 000000000..2f69c8a50 --- /dev/null +++ b/internal/manager/config/tasks.go @@ -0,0 +1,29 @@ +package config + +type ScanMetadataOptions struct { + // Set name, date, details from metadata (if present) + // Deprecated: not implemented + UseFileMetadata bool `json:"useFileMetadata"` + // Strip file extension from title + // Deprecated: not implemented + StripFileExtension bool `json:"stripFileExtension"` + // Generate previews during scan + ScanGeneratePreviews bool `json:"scanGeneratePreviews"` + // Generate image previews during scan + ScanGenerateImagePreviews bool `json:"scanGenerateImagePreviews"` + // Generate sprites during scan + ScanGenerateSprites bool `json:"scanGenerateSprites"` + // Generate phashes during scan + ScanGeneratePhashes bool `json:"scanGeneratePhashes"` + // Generate image thumbnails during scan + ScanGenerateThumbnails bool `json:"scanGenerateThumbnails"` +} + +type AutoTagMetadataOptions struct { + // IDs of performers to tag files with, or "*" for all + Performers []string `json:"performers"` + // IDs of studios to tag files with, or "*" for all + Studios []string `json:"studios"` + // IDs of tags to tag files with, or "*" for all + Tags []string `json:"tags"` +} diff --git a/internal/manager/config/ui.go b/internal/manager/config/ui.go new file mode 100644 index 000000000..a2744a741 --- /dev/null +++ b/internal/manager/config/ui.go @@ -0,0 +1,106 @@ +package config + +import ( + "fmt" + "io" + "strconv" +) + +type ConfigImageLightboxResult struct { + SlideshowDelay *int `json:"slideshowDelay"` + DisplayMode *ImageLightboxDisplayMode `json:"displayMode"` + ScaleUp *bool `json:"scaleUp"` + ResetZoomOnNav *bool `json:"resetZoomOnNav"` + ScrollMode *ImageLightboxScrollMode `json:"scrollMode"` + ScrollAttemptsBeforeChange int `json:"scrollAttemptsBeforeChange"` +} + +type ImageLightboxDisplayMode string + +const ( + ImageLightboxDisplayModeOriginal ImageLightboxDisplayMode = "ORIGINAL" + ImageLightboxDisplayModeFitXy ImageLightboxDisplayMode = "FIT_XY" + ImageLightboxDisplayModeFitX ImageLightboxDisplayMode = "FIT_X" +) + +var AllImageLightboxDisplayMode = []ImageLightboxDisplayMode{ + ImageLightboxDisplayModeOriginal, + ImageLightboxDisplayModeFitXy, + ImageLightboxDisplayModeFitX, +} + +func (e ImageLightboxDisplayMode) IsValid() bool { + switch e { + case ImageLightboxDisplayModeOriginal, ImageLightboxDisplayModeFitXy, ImageLightboxDisplayModeFitX: + return true + } + return false +} + +func (e ImageLightboxDisplayMode) String() string { + return string(e) +} + +func (e *ImageLightboxDisplayMode) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = ImageLightboxDisplayMode(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid ImageLightboxDisplayMode", str) + } + return nil +} + +func (e ImageLightboxDisplayMode) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type ImageLightboxScrollMode string + +const ( + ImageLightboxScrollModeZoom ImageLightboxScrollMode = "ZOOM" + ImageLightboxScrollModePanY ImageLightboxScrollMode = "PAN_Y" +) + +var AllImageLightboxScrollMode = []ImageLightboxScrollMode{ + ImageLightboxScrollModeZoom, + ImageLightboxScrollModePanY, +} + +func (e ImageLightboxScrollMode) IsValid() bool { + switch e { + case ImageLightboxScrollModeZoom, ImageLightboxScrollModePanY: + return true + } + return false +} + +func (e ImageLightboxScrollMode) String() string { + return string(e) +} + +func (e *ImageLightboxScrollMode) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = ImageLightboxScrollMode(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid ImageLightboxScrollMode", str) + } + return nil +} + +func (e ImageLightboxScrollMode) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type ConfigDisableDropdownCreate struct { + Performer bool `json:"performer"` + Tag bool `json:"tag"` + Studio bool `json:"studio"` +} diff --git a/internal/manager/filename_parser.go b/internal/manager/filename_parser.go index e82cfa1ed..f02f95c73 100644 --- a/internal/manager/filename_parser.go +++ b/internal/manager/filename_parser.go @@ -1,7 +1,7 @@ package manager import ( - "database/sql" + "context" "errors" "path/filepath" "regexp" @@ -16,6 +16,32 @@ import ( "github.com/stashapp/stash/pkg/tag" ) +type SceneParserInput struct { + IgnoreWords []string `json:"ignoreWords"` + WhitespaceCharacters *string `json:"whitespaceCharacters"` + CapitalizeTitle *bool `json:"capitalizeTitle"` + IgnoreOrganized *bool `json:"ignoreOrganized"` +} + +type SceneParserResult struct { + Scene *models.Scene `json:"scene"` + Title *string `json:"title"` + Details *string `json:"details"` + URL *string `json:"url"` + Date *string `json:"date"` + Rating *int `json:"rating"` + StudioID *string `json:"studio_id"` + GalleryIds []string `json:"gallery_ids"` + PerformerIds []string `json:"performer_ids"` + Movies []*SceneMovieID `json:"movies"` + TagIds []string `json:"tag_ids"` +} + +type SceneMovieID struct { + MovieID string `json:"movie_id"` + SceneIndex *string `json:"scene_index"` +} + type parserField struct { field string fieldRegex *regexp.Regexp @@ -211,9 +237,10 @@ type sceneHolder struct { func newSceneHolder(scene *models.Scene) *sceneHolder { sceneCopy := models.Scene{ - ID: scene.ID, - Checksum: scene.Checksum, - Path: scene.Path, + ID: scene.ID, + Files: scene.Files, + // Checksum: scene.Checksum, + // Path: scene.Path, } ret := sceneHolder{ scene: scene, @@ -280,11 +307,9 @@ func (h *sceneHolder) setDate(field *parserField, value string) { // ensure the date is valid // only set if new value is different from the old - if validateDate(fullDate) && h.scene.Date.String != fullDate { - h.result.Date = models.SQLiteDate{ - String: fullDate, - Valid: true, - } + if validateDate(fullDate) && h.scene.Date != nil && h.scene.Date.String() != fullDate { + d := models.NewDate(fullDate) + h.result.Date = &d } } @@ -310,24 +335,17 @@ func (h *sceneHolder) setField(field parserField, value interface{}) { switch field.field { case "title": - h.result.Title = sql.NullString{ - String: value.(string), - Valid: true, - } + v := value.(string) + h.result.Title = v case "date": if validateDate(value.(string)) { - h.result.Date = models.SQLiteDate{ - String: value.(string), - Valid: true, - } + d := models.NewDate(value.(string)) + h.result.Date = &d } case "rating": rating, _ := strconv.Atoi(value.(string)) if validateRating(rating) { - h.result.Rating = sql.NullInt64{ - Int64: int64(rating), - Valid: true, - } + h.result.Rating = &rating } case "performer": // add performer to list @@ -402,7 +420,7 @@ func (m parseMapper) parse(scene *models.Scene) *sceneHolder { type SceneFilenameParser struct { Pattern string - ParserInput models.SceneParserInput + ParserInput SceneParserInput Filter *models.FindFilterType whitespaceRE *regexp.Regexp performerCache map[string]*models.Performer @@ -411,7 +429,7 @@ type SceneFilenameParser struct { tagCache map[string]*models.Tag } -func NewSceneFilenameParser(filter *models.FindFilterType, config models.SceneParserInput) *SceneFilenameParser { +func NewSceneFilenameParser(filter *models.FindFilterType, config SceneParserInput) *SceneFilenameParser { p := &SceneFilenameParser{ Pattern: *filter.Q, ParserInput: config, @@ -444,7 +462,15 @@ func (p *SceneFilenameParser) initWhiteSpaceRegex() { } } -func (p *SceneFilenameParser) Parse(repo models.ReaderRepository) ([]*models.SceneParserResult, int, error) { +type SceneFilenameParserRepository struct { + Scene scene.Queryer + Performer PerformerNamesFinder + Studio studio.Queryer + Movie MovieNameFinder + Tag tag.Queryer +} + +func (p *SceneFilenameParser) Parse(ctx context.Context, repo SceneFilenameParserRepository) ([]*SceneParserResult, int, error) { // perform the query to find the scenes mapper, err := newParseMapper(p.Pattern, p.ParserInput.IgnoreWords) @@ -466,26 +492,26 @@ func (p *SceneFilenameParser) Parse(repo models.ReaderRepository) ([]*models.Sce p.Filter.Q = nil - scenes, total, err := scene.QueryWithCount(repo.Scene(), sceneFilter, p.Filter) + scenes, total, err := scene.QueryWithCount(ctx, repo.Scene, sceneFilter, p.Filter) if err != nil { return nil, 0, err } - ret := p.parseScenes(repo, scenes, mapper) + ret := p.parseScenes(ctx, repo, scenes, mapper) return ret, total, nil } -func (p *SceneFilenameParser) parseScenes(repo models.ReaderRepository, scenes []*models.Scene, mapper *parseMapper) []*models.SceneParserResult { - var ret []*models.SceneParserResult +func (p *SceneFilenameParser) parseScenes(ctx context.Context, repo SceneFilenameParserRepository, scenes []*models.Scene, mapper *parseMapper) []*SceneParserResult { + var ret []*SceneParserResult for _, scene := range scenes { sceneHolder := mapper.parse(scene) if sceneHolder != nil { - r := &models.SceneParserResult{ + r := &SceneParserResult{ Scene: scene, } - p.setParserResult(repo, *sceneHolder, r) + p.setParserResult(ctx, repo, *sceneHolder, r) ret = append(ret, r) } @@ -504,7 +530,11 @@ func (p SceneFilenameParser) replaceWhitespaceCharacters(value string) string { return value } -func (p *SceneFilenameParser) queryPerformer(qb models.PerformerReader, performerName string) *models.Performer { +type PerformerNamesFinder interface { + FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Performer, error) +} + +func (p *SceneFilenameParser) queryPerformer(ctx context.Context, qb PerformerNamesFinder, performerName string) *models.Performer { // massage the performer name performerName = delimiterRE.ReplaceAllString(performerName, " ") @@ -514,7 +544,7 @@ func (p *SceneFilenameParser) queryPerformer(qb models.PerformerReader, performe } // perform an exact match and grab the first - performers, _ := qb.FindByNames([]string{performerName}, true) + performers, _ := qb.FindByNames(ctx, []string{performerName}, true) var ret *models.Performer if len(performers) > 0 { @@ -527,7 +557,7 @@ func (p *SceneFilenameParser) queryPerformer(qb models.PerformerReader, performe return ret } -func (p *SceneFilenameParser) queryStudio(qb models.StudioReader, studioName string) *models.Studio { +func (p *SceneFilenameParser) queryStudio(ctx context.Context, qb studio.Queryer, studioName string) *models.Studio { // massage the performer name studioName = delimiterRE.ReplaceAllString(studioName, " ") @@ -536,11 +566,11 @@ func (p *SceneFilenameParser) queryStudio(qb models.StudioReader, studioName str return ret } - ret, _ := studio.ByName(qb, studioName) + ret, _ := studio.ByName(ctx, qb, studioName) // try to match on alias if ret == nil { - ret, _ = studio.ByAlias(qb, studioName) + ret, _ = studio.ByAlias(ctx, qb, studioName) } // add result to cache @@ -549,7 +579,11 @@ func (p *SceneFilenameParser) queryStudio(qb models.StudioReader, studioName str return ret } -func (p *SceneFilenameParser) queryMovie(qb models.MovieReader, movieName string) *models.Movie { +type MovieNameFinder interface { + FindByName(ctx context.Context, name string, nocase bool) (*models.Movie, error) +} + +func (p *SceneFilenameParser) queryMovie(ctx context.Context, qb MovieNameFinder, movieName string) *models.Movie { // massage the movie name movieName = delimiterRE.ReplaceAllString(movieName, " ") @@ -558,7 +592,7 @@ func (p *SceneFilenameParser) queryMovie(qb models.MovieReader, movieName string return ret } - ret, _ := qb.FindByName(movieName, true) + ret, _ := qb.FindByName(ctx, movieName, true) // add result to cache p.movieCache[movieName] = ret @@ -566,7 +600,7 @@ func (p *SceneFilenameParser) queryMovie(qb models.MovieReader, movieName string return ret } -func (p *SceneFilenameParser) queryTag(qb models.TagReader, tagName string) *models.Tag { +func (p *SceneFilenameParser) queryTag(ctx context.Context, qb tag.Queryer, tagName string) *models.Tag { // massage the tag name tagName = delimiterRE.ReplaceAllString(tagName, " ") @@ -576,11 +610,11 @@ func (p *SceneFilenameParser) queryTag(qb models.TagReader, tagName string) *mod } // match tag name exactly - ret, _ := tag.ByName(qb, tagName) + ret, _ := tag.ByName(ctx, qb, tagName) // try to match on alias if ret == nil { - ret, _ = tag.ByAlias(qb, tagName) + ret, _ = tag.ByAlias(ctx, qb, tagName) } // add result to cache @@ -589,12 +623,12 @@ func (p *SceneFilenameParser) queryTag(qb models.TagReader, tagName string) *mod return ret } -func (p *SceneFilenameParser) setPerformers(qb models.PerformerReader, h sceneHolder, result *models.SceneParserResult) { +func (p *SceneFilenameParser) setPerformers(ctx context.Context, qb PerformerNamesFinder, h sceneHolder, result *SceneParserResult) { // query for each performer performersSet := make(map[int]bool) for _, performerName := range h.performers { if performerName != "" { - performer := p.queryPerformer(qb, performerName) + performer := p.queryPerformer(ctx, qb, performerName) if performer != nil { if _, found := performersSet[performer.ID]; !found { result.PerformerIds = append(result.PerformerIds, strconv.Itoa(performer.ID)) @@ -605,12 +639,12 @@ func (p *SceneFilenameParser) setPerformers(qb models.PerformerReader, h sceneHo } } -func (p *SceneFilenameParser) setTags(qb models.TagReader, h sceneHolder, result *models.SceneParserResult) { +func (p *SceneFilenameParser) setTags(ctx context.Context, qb tag.Queryer, h sceneHolder, result *SceneParserResult) { // query for each performer tagsSet := make(map[int]bool) for _, tagName := range h.tags { if tagName != "" { - tag := p.queryTag(qb, tagName) + tag := p.queryTag(ctx, qb, tagName) if tag != nil { if _, found := tagsSet[tag.ID]; !found { result.TagIds = append(result.TagIds, strconv.Itoa(tag.ID)) @@ -621,10 +655,10 @@ func (p *SceneFilenameParser) setTags(qb models.TagReader, h sceneHolder, result } } -func (p *SceneFilenameParser) setStudio(qb models.StudioReader, h sceneHolder, result *models.SceneParserResult) { +func (p *SceneFilenameParser) setStudio(ctx context.Context, qb studio.Queryer, h sceneHolder, result *SceneParserResult) { // query for each performer if h.studio != "" { - studio := p.queryStudio(qb, h.studio) + studio := p.queryStudio(ctx, qb, h.studio) if studio != nil { studioID := strconv.Itoa(studio.ID) result.StudioID = &studioID @@ -632,15 +666,15 @@ func (p *SceneFilenameParser) setStudio(qb models.StudioReader, h sceneHolder, r } } -func (p *SceneFilenameParser) setMovies(qb models.MovieReader, h sceneHolder, result *models.SceneParserResult) { +func (p *SceneFilenameParser) setMovies(ctx context.Context, qb MovieNameFinder, h sceneHolder, result *SceneParserResult) { // query for each movie moviesSet := make(map[int]bool) for _, movieName := range h.movies { if movieName != "" { - movie := p.queryMovie(qb, movieName) + movie := p.queryMovie(ctx, qb, movieName) if movie != nil { if _, found := moviesSet[movie.ID]; !found { - result.Movies = append(result.Movies, &models.SceneMovieID{ + result.Movies = append(result.Movies, &SceneMovieID{ MovieID: strconv.Itoa(movie.ID), }) moviesSet[movie.ID] = true @@ -650,9 +684,9 @@ func (p *SceneFilenameParser) setMovies(qb models.MovieReader, h sceneHolder, re } } -func (p *SceneFilenameParser) setParserResult(repo models.ReaderRepository, h sceneHolder, result *models.SceneParserResult) { - if h.result.Title.Valid { - title := h.result.Title.String +func (p *SceneFilenameParser) setParserResult(ctx context.Context, repo SceneFilenameParserRepository, h sceneHolder, result *SceneParserResult) { + if h.result.Title != "" { + title := h.result.Title title = p.replaceWhitespaceCharacters(title) if p.ParserInput.CapitalizeTitle != nil && *p.ParserInput.CapitalizeTitle { @@ -662,25 +696,24 @@ func (p *SceneFilenameParser) setParserResult(repo models.ReaderRepository, h sc result.Title = &title } - if h.result.Date.Valid { - result.Date = &h.result.Date.String + if h.result.Date != nil { + dateStr := h.result.Date.String() + result.Date = &dateStr } - if h.result.Rating.Valid { - rating := int(h.result.Rating.Int64) - result.Rating = &rating + if h.result.Rating != nil { + result.Rating = h.result.Rating } if len(h.performers) > 0 { - p.setPerformers(repo.Performer(), h, result) + p.setPerformers(ctx, repo.Performer, h, result) } if len(h.tags) > 0 { - p.setTags(repo.Tag(), h, result) + p.setTags(ctx, repo.Tag, h, result) } - p.setStudio(repo.Studio(), h, result) + p.setStudio(ctx, repo.Studio, h, result) if len(h.movies) > 0 { - p.setMovies(repo.Movie(), h, result) + p.setMovies(ctx, repo.Movie, h, result) } - } diff --git a/internal/manager/fingerprint.go b/internal/manager/fingerprint.go new file mode 100644 index 000000000..16d0eb851 --- /dev/null +++ b/internal/manager/fingerprint.go @@ -0,0 +1,88 @@ +package manager + +import ( + "errors" + "fmt" + "io" + + "github.com/stashapp/stash/internal/manager/config" + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/hash/md5" + "github.com/stashapp/stash/pkg/hash/oshash" +) + +type fingerprintCalculator struct { + Config *config.Instance +} + +func (c *fingerprintCalculator) calculateOshash(f *file.BaseFile, o file.Opener) (*file.Fingerprint, error) { + r, err := o.Open() + if err != nil { + return nil, fmt.Errorf("opening file: %w", err) + } + + defer r.Close() + + rc, isRC := r.(io.ReadSeeker) + if !isRC { + return nil, errors.New("cannot calculate oshash for non-readcloser") + } + + hash, err := oshash.FromReader(rc, f.Size) + if err != nil { + return nil, fmt.Errorf("calculating oshash: %w", err) + } + + return &file.Fingerprint{ + Type: file.FingerprintTypeOshash, + Fingerprint: hash, + }, nil +} + +func (c *fingerprintCalculator) calculateMD5(o file.Opener) (*file.Fingerprint, error) { + r, err := o.Open() + if err != nil { + return nil, fmt.Errorf("opening file: %w", err) + } + + defer r.Close() + + hash, err := md5.FromReader(r) + if err != nil { + return nil, fmt.Errorf("calculating md5: %w", err) + } + + return &file.Fingerprint{ + Type: file.FingerprintTypeMD5, + Fingerprint: hash, + }, nil +} + +func (c *fingerprintCalculator) CalculateFingerprints(f *file.BaseFile, o file.Opener) ([]file.Fingerprint, error) { + var ret []file.Fingerprint + calculateMD5 := true + + if isVideo(f.Basename) { + // calculate oshash first + fp, err := c.calculateOshash(f, o) + if err != nil { + return nil, err + } + + ret = append(ret, *fp) + + // only calculate MD5 if enabled in config + calculateMD5 = c.Config.IsCalculateMD5() + } + + if calculateMD5 { + fp, err := c.calculateMD5(o) + if err != nil { + return nil, err + } + + ret = append(ret, *fp) + } + + return ret, nil +} diff --git a/internal/manager/gallery.go b/internal/manager/gallery.go deleted file mode 100644 index b7929ee67..000000000 --- a/internal/manager/gallery.go +++ /dev/null @@ -1,17 +0,0 @@ -package manager - -import ( - "os" - - "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" -) - -func DeleteGalleryFile(gallery *models.Gallery) { - if gallery.Path.Valid { - err := os.Remove(gallery.Path.String) - if err != nil { - logger.Warnf("Could not delete file %s: %s", gallery.Path.String, err.Error()) - } - } -} diff --git a/internal/manager/generator_interactive_heatmap_speed.go b/internal/manager/generator_interactive_heatmap_speed.go index a7da933fc..9155f7b1f 100644 --- a/internal/manager/generator_interactive_heatmap_speed.go +++ b/internal/manager/generator_interactive_heatmap_speed.go @@ -14,7 +14,7 @@ import ( ) type InteractiveHeatmapSpeedGenerator struct { - InteractiveSpeed int64 + InteractiveSpeed int Funscript Script FunscriptPath string HeatmapPath string @@ -175,7 +175,7 @@ func (g *InteractiveHeatmapSpeedGenerator) RenderHeatmap() error { return err } -func (funscript *Script) CalculateMedian() int64 { +func (funscript *Script) CalculateMedian() int { sort.Slice(funscript.Actions, func(i, j int) bool { return funscript.Actions[i].Speed < funscript.Actions[j].Speed }) @@ -183,10 +183,10 @@ func (funscript *Script) CalculateMedian() int64 { mNumber := len(funscript.Actions) / 2 if len(funscript.Actions)%2 != 0 { - return int64(funscript.Actions[mNumber].Speed) + return int(funscript.Actions[mNumber].Speed) } - return int64((funscript.Actions[mNumber-1].Speed + funscript.Actions[mNumber].Speed) / 2) + return int((funscript.Actions[mNumber-1].Speed + funscript.Actions[mNumber].Speed) / 2) } func (gt GradientTable) GetInterpolatedColorFor(t float64) colorful.Color { diff --git a/internal/manager/image.go b/internal/manager/image.go deleted file mode 100644 index c7eb781f6..000000000 --- a/internal/manager/image.go +++ /dev/null @@ -1,59 +0,0 @@ -package manager - -import ( - "archive/zip" - "strings" - - "github.com/stashapp/stash/internal/manager/config" - "github.com/stashapp/stash/pkg/file" - - "github.com/stashapp/stash/pkg/logger" -) - -func walkGalleryZip(path string, walkFunc func(file *zip.File) error) error { - readCloser, err := zip.OpenReader(path) - if err != nil { - return err - } - defer readCloser.Close() - - excludeImgRegex := generateRegexps(config.GetInstance().GetImageExcludes()) - - for _, f := range readCloser.File { - if f.FileInfo().IsDir() { - continue - } - - if strings.Contains(f.Name, "__MACOSX") { - continue - } - - if !isImage(f.Name) { - continue - } - - if matchFileRegex(file.ZipFile(path, f).Path(), excludeImgRegex) { - continue - } - - err := walkFunc(f) - if err != nil { - return err - } - } - - return nil -} - -func countImagesInZip(path string) int { - ret := 0 - err := walkGalleryZip(path, func(file *zip.File) error { - ret++ - return nil - }) - if err != nil { - logger.Warnf("Error while walking gallery zip: %v", err) - } - - return ret -} diff --git a/internal/manager/import.go b/internal/manager/import.go index c2f2820c7..0762096c2 100644 --- a/internal/manager/import.go +++ b/internal/manager/import.go @@ -1,29 +1,74 @@ package manager import ( + "context" "fmt" + "io" + "strconv" "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" ) -type importer interface { - PreImport() error - PostImport(id int) error - Name() string - FindExistingID() (*int, error) - Create() (*int, error) - Update(id int) error +type ImportDuplicateEnum string + +const ( + ImportDuplicateEnumIgnore ImportDuplicateEnum = "IGNORE" + ImportDuplicateEnumOverwrite ImportDuplicateEnum = "OVERWRITE" + ImportDuplicateEnumFail ImportDuplicateEnum = "FAIL" +) + +var AllImportDuplicateEnum = []ImportDuplicateEnum{ + ImportDuplicateEnumIgnore, + ImportDuplicateEnumOverwrite, + ImportDuplicateEnumFail, } -func performImport(i importer, duplicateBehaviour models.ImportDuplicateEnum) error { - if err := i.PreImport(); err != nil { +func (e ImportDuplicateEnum) IsValid() bool { + switch e { + case ImportDuplicateEnumIgnore, ImportDuplicateEnumOverwrite, ImportDuplicateEnumFail: + return true + } + return false +} + +func (e ImportDuplicateEnum) String() string { + return string(e) +} + +func (e *ImportDuplicateEnum) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = ImportDuplicateEnum(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid ImportDuplicateEnum", str) + } + return nil +} + +func (e ImportDuplicateEnum) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type importer interface { + PreImport(ctx context.Context) error + PostImport(ctx context.Context, id int) error + Name() string + FindExistingID(ctx context.Context) (*int, error) + Create(ctx context.Context) (*int, error) + Update(ctx context.Context, id int) error +} + +func performImport(ctx context.Context, i importer, duplicateBehaviour ImportDuplicateEnum) error { + if err := i.PreImport(ctx); err != nil { return err } // try to find an existing object with the same name name := i.Name() - existing, err := i.FindExistingID() + existing, err := i.FindExistingID(ctx) if err != nil { return fmt.Errorf("error finding existing objects: %v", err) } @@ -31,21 +76,21 @@ func performImport(i importer, duplicateBehaviour models.ImportDuplicateEnum) er var id int if existing != nil { - if duplicateBehaviour == models.ImportDuplicateEnumFail { + if duplicateBehaviour == ImportDuplicateEnumFail { return fmt.Errorf("existing object with name '%s'", name) - } else if duplicateBehaviour == models.ImportDuplicateEnumIgnore { + } else if duplicateBehaviour == ImportDuplicateEnumIgnore { logger.Info("Skipping existing object") return nil } // must be overwriting id = *existing - if err := i.Update(id); err != nil { + if err := i.Update(ctx, id); err != nil { return fmt.Errorf("error updating existing object: %v", err) } } else { // creating - createdID, err := i.Create() + createdID, err := i.Create(ctx) if err != nil { return fmt.Errorf("error creating object: %v", err) } @@ -53,7 +98,7 @@ func performImport(i importer, duplicateBehaviour models.ImportDuplicateEnum) er id = *createdID } - if err := i.PostImport(id); err != nil { + if err := i.PostImport(ctx, id); err != nil { return err } diff --git a/internal/manager/import_file.go b/internal/manager/import_file.go new file mode 100644 index 000000000..bad9d5bce --- /dev/null +++ b/internal/manager/import_file.go @@ -0,0 +1,255 @@ +package manager + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "time" + + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/models/jsonschema" +) + +// HACK: this is all here because of an import loop in jsonschema -> models -> file + +var errZipFileNotExist = errors.New("zip file does not exist") + +type fileFolderImporter struct { + ReaderWriter file.Store + FolderStore file.FolderStore + Input jsonschema.DirEntry + + file file.File + folder *file.Folder +} + +func (i *fileFolderImporter) PreImport(ctx context.Context) error { + var err error + + switch ff := i.Input.(type) { + case *jsonschema.BaseDirEntry: + i.folder, err = i.folderJSONToFolder(ctx, ff) + default: + i.file, err = i.fileJSONToFile(ctx, i.Input) + } + + return err +} + +func (i *fileFolderImporter) folderJSONToFolder(ctx context.Context, baseJSON *jsonschema.BaseDirEntry) (*file.Folder, error) { + ret := file.Folder{ + DirEntry: file.DirEntry{ + ModTime: baseJSON.ModTime.GetTime(), + }, + Path: baseJSON.Path, + CreatedAt: baseJSON.CreatedAt.GetTime(), + UpdatedAt: baseJSON.CreatedAt.GetTime(), + } + + if err := i.populateZipFileID(ctx, &ret.DirEntry); err != nil { + return nil, err + } + + // set parent folder id during the creation process + + return &ret, nil +} + +func (i *fileFolderImporter) fileJSONToFile(ctx context.Context, fileJSON jsonschema.DirEntry) (file.File, error) { + switch ff := fileJSON.(type) { + case *jsonschema.VideoFile: + baseFile, err := i.baseFileJSONToBaseFile(ctx, ff.BaseFile) + if err != nil { + return nil, err + } + return &file.VideoFile{ + BaseFile: baseFile, + Format: ff.Format, + Width: ff.Width, + Height: ff.Height, + Duration: ff.Duration, + VideoCodec: ff.VideoCodec, + AudioCodec: ff.AudioCodec, + FrameRate: ff.FrameRate, + BitRate: ff.BitRate, + Interactive: ff.Interactive, + InteractiveSpeed: ff.InteractiveSpeed, + }, nil + case *jsonschema.ImageFile: + baseFile, err := i.baseFileJSONToBaseFile(ctx, ff.BaseFile) + if err != nil { + return nil, err + } + return &file.ImageFile{ + BaseFile: baseFile, + Format: ff.Format, + Width: ff.Width, + Height: ff.Height, + }, nil + case *jsonschema.BaseFile: + return i.baseFileJSONToBaseFile(ctx, ff) + } + + return nil, fmt.Errorf("unknown file type") +} + +func (i *fileFolderImporter) baseFileJSONToBaseFile(ctx context.Context, baseJSON *jsonschema.BaseFile) (*file.BaseFile, error) { + baseFile := file.BaseFile{ + DirEntry: file.DirEntry{ + ModTime: baseJSON.ModTime.GetTime(), + }, + Basename: filepath.Base(baseJSON.Path), + Size: baseJSON.Size, + CreatedAt: baseJSON.CreatedAt.GetTime(), + UpdatedAt: baseJSON.CreatedAt.GetTime(), + } + + for _, fp := range baseJSON.Fingerprints { + baseFile.Fingerprints = append(baseFile.Fingerprints, file.Fingerprint{ + Type: fp.Type, + Fingerprint: fp.Fingerprint, + }) + } + + if err := i.populateZipFileID(ctx, &baseFile.DirEntry); err != nil { + return nil, err + } + + return &baseFile, nil +} + +func (i *fileFolderImporter) populateZipFileID(ctx context.Context, f *file.DirEntry) error { + zipFilePath := i.Input.DirEntry().ZipFile + if zipFilePath != "" { + zf, err := i.ReaderWriter.FindByPath(ctx, zipFilePath) + if err != nil { + return fmt.Errorf("error finding file by path %q: %v", zipFilePath, err) + } + + if zf == nil { + return errZipFileNotExist + } + + id := zf.Base().ID + f.ZipFileID = &id + } + + return nil +} + +func (i *fileFolderImporter) PostImport(ctx context.Context, id int) error { + return nil +} + +func (i *fileFolderImporter) Name() string { + return i.Input.DirEntry().Path +} + +func (i *fileFolderImporter) FindExistingID(ctx context.Context) (*int, error) { + path := i.Input.DirEntry().Path + existing, err := i.ReaderWriter.FindByPath(ctx, path) + if err != nil { + return nil, err + } + + if existing != nil { + id := int(existing.Base().ID) + return &id, nil + } + + return nil, nil +} + +func (i *fileFolderImporter) createFolderHierarchy(ctx context.Context, p string) (*file.Folder, error) { + parentPath := filepath.Dir(p) + + if parentPath == p { + // get or create this folder + return i.getOrCreateFolder(ctx, p, nil) + } + + parent, err := i.createFolderHierarchy(ctx, parentPath) + if err != nil { + return nil, err + } + + return i.getOrCreateFolder(ctx, p, parent) +} + +func (i *fileFolderImporter) getOrCreateFolder(ctx context.Context, path string, parent *file.Folder) (*file.Folder, error) { + folder, err := i.FolderStore.FindByPath(ctx, path) + if err != nil { + return nil, err + } + + if folder != nil { + return folder, nil + } + + now := time.Now() + + folder = &file.Folder{ + Path: path, + CreatedAt: now, + UpdatedAt: now, + } + + if parent != nil { + folder.ZipFileID = parent.ZipFileID + folder.ParentFolderID = &parent.ID + } + + if err := i.FolderStore.Create(ctx, folder); err != nil { + return nil, err + } + + return folder, nil +} + +func (i *fileFolderImporter) Create(ctx context.Context) (*int, error) { + // create folder hierarchy and set parent folder id + path := i.Input.DirEntry().Path + path = filepath.Dir(path) + folder, err := i.createFolderHierarchy(ctx, path) + if err != nil { + return nil, fmt.Errorf("creating folder hierarchy for %q: %w", path, err) + } + + if i.folder != nil { + return i.createFolder(ctx, folder) + } + + return i.createFile(ctx, folder) +} + +func (i *fileFolderImporter) createFile(ctx context.Context, parentFolder *file.Folder) (*int, error) { + if parentFolder != nil { + i.file.Base().ParentFolderID = parentFolder.ID + } + + if err := i.ReaderWriter.Create(ctx, i.file); err != nil { + return nil, fmt.Errorf("error creating file: %w", err) + } + + id := int(i.file.Base().ID) + return &id, nil +} + +func (i *fileFolderImporter) createFolder(ctx context.Context, parentFolder *file.Folder) (*int, error) { + if parentFolder != nil { + i.folder.ParentFolderID = &parentFolder.ID + } + + if err := i.FolderStore.Create(ctx, i.folder); err != nil { + return nil, fmt.Errorf("error creating folder: %w", err) + } + + id := int(i.folder.ID) + return &id, nil +} + +func (i *fileFolderImporter) Update(ctx context.Context, id int) error { + // update not supported + return nil +} diff --git a/internal/manager/json_utils.go b/internal/manager/json_utils.go index 9a3330a61..a2cb61b36 100644 --- a/internal/manager/json_utils.go +++ b/internal/manager/json_utils.go @@ -1,6 +1,8 @@ package manager import ( + "path/filepath" + "github.com/stashapp/stash/pkg/models/jsonschema" "github.com/stashapp/stash/pkg/models/paths" ) @@ -9,14 +11,6 @@ type jsonUtils struct { json paths.JSONPaths } -func (jp *jsonUtils) getMappings() (*jsonschema.Mappings, error) { - return jsonschema.LoadMappingsFile(jp.json.MappingsFile) -} - -func (jp *jsonUtils) saveMappings(mappings *jsonschema.Mappings) error { - return jsonschema.SaveMappingsFile(jp.json.MappingsFile, mappings) -} - func (jp *jsonUtils) getScraped() ([]jsonschema.ScrapedItem, error) { return jsonschema.LoadScrapedFile(jp.json.ScrapedFile) } @@ -25,58 +19,34 @@ func (jp *jsonUtils) saveScaped(scraped []jsonschema.ScrapedItem) error { return jsonschema.SaveScrapedFile(jp.json.ScrapedFile, scraped) } -func (jp *jsonUtils) getPerformer(checksum string) (*jsonschema.Performer, error) { - return jsonschema.LoadPerformerFile(jp.json.PerformerJSONPath(checksum)) +func (jp *jsonUtils) savePerformer(fn string, performer *jsonschema.Performer) error { + return jsonschema.SavePerformerFile(filepath.Join(jp.json.Performers, fn), performer) } -func (jp *jsonUtils) savePerformer(checksum string, performer *jsonschema.Performer) error { - return jsonschema.SavePerformerFile(jp.json.PerformerJSONPath(checksum), performer) +func (jp *jsonUtils) saveStudio(fn string, studio *jsonschema.Studio) error { + return jsonschema.SaveStudioFile(filepath.Join(jp.json.Studios, fn), studio) } -func (jp *jsonUtils) getStudio(checksum string) (*jsonschema.Studio, error) { - return jsonschema.LoadStudioFile(jp.json.StudioJSONPath(checksum)) +func (jp *jsonUtils) saveTag(fn string, tag *jsonschema.Tag) error { + return jsonschema.SaveTagFile(filepath.Join(jp.json.Tags, fn), tag) } -func (jp *jsonUtils) saveStudio(checksum string, studio *jsonschema.Studio) error { - return jsonschema.SaveStudioFile(jp.json.StudioJSONPath(checksum), studio) +func (jp *jsonUtils) saveMovie(fn string, movie *jsonschema.Movie) error { + return jsonschema.SaveMovieFile(filepath.Join(jp.json.Movies, fn), movie) } -func (jp *jsonUtils) getTag(checksum string) (*jsonschema.Tag, error) { - return jsonschema.LoadTagFile(jp.json.TagJSONPath(checksum)) +func (jp *jsonUtils) saveScene(fn string, scene *jsonschema.Scene) error { + return jsonschema.SaveSceneFile(filepath.Join(jp.json.Scenes, fn), scene) } -func (jp *jsonUtils) saveTag(checksum string, tag *jsonschema.Tag) error { - return jsonschema.SaveTagFile(jp.json.TagJSONPath(checksum), tag) +func (jp *jsonUtils) saveImage(fn string, image *jsonschema.Image) error { + return jsonschema.SaveImageFile(filepath.Join(jp.json.Images, fn), image) } -func (jp *jsonUtils) getMovie(checksum string) (*jsonschema.Movie, error) { - return jsonschema.LoadMovieFile(jp.json.MovieJSONPath(checksum)) +func (jp *jsonUtils) saveGallery(fn string, gallery *jsonschema.Gallery) error { + return jsonschema.SaveGalleryFile(filepath.Join(jp.json.Galleries, fn), gallery) } -func (jp *jsonUtils) saveMovie(checksum string, movie *jsonschema.Movie) error { - return jsonschema.SaveMovieFile(jp.json.MovieJSONPath(checksum), movie) -} - -func (jp *jsonUtils) getScene(checksum string) (*jsonschema.Scene, error) { - return jsonschema.LoadSceneFile(jp.json.SceneJSONPath(checksum)) -} - -func (jp *jsonUtils) saveScene(checksum string, scene *jsonschema.Scene) error { - return jsonschema.SaveSceneFile(jp.json.SceneJSONPath(checksum), scene) -} - -func (jp *jsonUtils) getImage(checksum string) (*jsonschema.Image, error) { - return jsonschema.LoadImageFile(jp.json.ImageJSONPath(checksum)) -} - -func (jp *jsonUtils) saveImage(checksum string, image *jsonschema.Image) error { - return jsonschema.SaveImageFile(jp.json.ImageJSONPath(checksum), image) -} - -func (jp *jsonUtils) getGallery(checksum string) (*jsonschema.Gallery, error) { - return jsonschema.LoadGalleryFile(jp.json.GalleryJSONPath(checksum)) -} - -func (jp *jsonUtils) saveGallery(checksum string, gallery *jsonschema.Gallery) error { - return jsonschema.SaveGalleryFile(jp.json.GalleryJSONPath(checksum), gallery) +func (jp *jsonUtils) saveFile(fn string, file jsonschema.DirEntry) error { + return jsonschema.SaveFileFile(filepath.Join(jp.json.Files, fn), file) } diff --git a/internal/manager/manager.go b/internal/manager/manager.go index ae7655d54..7260abb28 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "io" "os" "path/filepath" "runtime/pprof" + "strconv" "strings" "sync" "time" @@ -15,21 +17,91 @@ import ( "github.com/stashapp/stash/internal/dlna" "github.com/stashapp/stash/internal/log" "github.com/stashapp/stash/internal/manager/config" - "github.com/stashapp/stash/pkg/database" "github.com/stashapp/stash/pkg/ffmpeg" + "github.com/stashapp/stash/pkg/file" + file_image "github.com/stashapp/stash/pkg/file/image" + "github.com/stashapp/stash/pkg/file/video" "github.com/stashapp/stash/pkg/fsutil" + "github.com/stashapp/stash/pkg/gallery" + "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/job" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/paths" "github.com/stashapp/stash/pkg/plugin" + "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/scene/generate" "github.com/stashapp/stash/pkg/scraper" "github.com/stashapp/stash/pkg/session" "github.com/stashapp/stash/pkg/sqlite" "github.com/stashapp/stash/pkg/utils" "github.com/stashapp/stash/ui" + + // register custom migrations + _ "github.com/stashapp/stash/pkg/sqlite/migrations" ) +type SystemStatus struct { + DatabaseSchema *int `json:"databaseSchema"` + DatabasePath *string `json:"databasePath"` + ConfigPath *string `json:"configPath"` + AppSchema int `json:"appSchema"` + Status SystemStatusEnum `json:"status"` +} + +type SystemStatusEnum string + +const ( + SystemStatusEnumSetup SystemStatusEnum = "SETUP" + SystemStatusEnumNeedsMigration SystemStatusEnum = "NEEDS_MIGRATION" + SystemStatusEnumOk SystemStatusEnum = "OK" +) + +var AllSystemStatusEnum = []SystemStatusEnum{ + SystemStatusEnumSetup, + SystemStatusEnumNeedsMigration, + SystemStatusEnumOk, +} + +func (e SystemStatusEnum) IsValid() bool { + switch e { + case SystemStatusEnumSetup, SystemStatusEnumNeedsMigration, SystemStatusEnumOk: + return true + } + return false +} + +func (e SystemStatusEnum) String() string { + return string(e) +} + +func (e *SystemStatusEnum) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = SystemStatusEnum(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid SystemStatusEnum", str) + } + return nil +} + +func (e SystemStatusEnum) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type SetupInput struct { + // Empty to indicate $HOME/.stash/config.yml default + ConfigLocation string `json:"configLocation"` + Stashes []*config.StashConfigInput `json:"stashes"` + // Empty to indicate default + DatabaseFile string `json:"databaseFile"` + // Empty to indicate default + GeneratedLocation string `json:"generatedLocation"` +} + type Manager struct { Config *config.Instance Logger *log.Logger @@ -52,7 +124,15 @@ type Manager struct { DLNAService *dlna.Service - TxnManager models.TransactionManager + Database *sqlite.Database + Repository Repository + + SceneService SceneService + ImageService ImageService + GalleryService GalleryService + + Scanner *file.Scanner + Cleaner *file.Cleaner scanSubs *subscriptionManager } @@ -87,6 +167,8 @@ func initialize() error { l := initLog() initProfiling(cfg.GetCPUProfilePath()) + db := sqlite.NewDatabase() + instance = &Manager{ Config: cfg, Logger: l, @@ -94,17 +176,46 @@ func initialize() error { DownloadStore: NewDownloadStore(), PluginCache: plugin.NewCache(cfg), - TxnManager: sqlite.NewTransactionManager(), + Database: db, + Repository: sqliteRepository(db), scanSubs: &subscriptionManager{}, } + instance.SceneService = &scene.Service{ + File: db.File, + Repository: db.Scene, + MarkerDestroyer: instance.Repository.SceneMarker, + } + + instance.ImageService = &image.Service{ + File: db.File, + Repository: db.Image, + } + + instance.GalleryService = &gallery.Service{ + Repository: db.Gallery, + ImageFinder: db.Image, + ImageService: instance.ImageService, + File: db.File, + Folder: db.Folder, + } + instance.JobManager = initJobManager() sceneServer := SceneServer{ - TXNManager: instance.TxnManager, + TxnManager: instance.Repository, + SceneCoverGetter: instance.Repository.Scene, } - instance.DLNAService = dlna.NewService(instance.TxnManager, instance.Config, &sceneServer) + + instance.DLNAService = dlna.NewService(instance.Repository, dlna.Repository{ + SceneFinder: instance.Repository.Scene, + FileFinder: instance.Repository.File, + StudioFinder: instance.Repository.Studio, + TagFinder: instance.Repository.Tag, + PerformerFinder: instance.Repository.Performer, + MovieFinder: instance.Repository.Movie, + }, instance.Config, &sceneServer) if !cfg.IsNewSystem() { logger.Infof("using config file: %s", cfg.GetConfigFile()) @@ -115,8 +226,15 @@ func initialize() error { if err != nil { return fmt.Errorf("error initializing configuration: %w", err) - } else if err := instance.PostInit(ctx); err != nil { - return err + } + + if err := instance.PostInit(ctx); err != nil { + var migrationNeededErr *sqlite.MigrationNeededError + if errors.As(err, &migrationNeededErr) { + logger.Warn(err.Error()) + } else { + return err + } } initSecurity(cfg) @@ -137,6 +255,9 @@ func initialize() error { logger.Warnf("could not initialize FFMPEG subsystem: %v", err) } + instance.Scanner = makeScanner(db, instance.PluginCache) + instance.Cleaner = makeCleaner(db, instance.PluginCache) + // if DLNA is enabled, start it now if instance.Config.GetDLNADefaultEnabled() { if err := instance.DLNAService.Start(nil); err != nil { @@ -147,6 +268,71 @@ func initialize() error { return nil } +func videoFileFilter(ctx context.Context, f file.File) bool { + return isVideo(f.Base().Basename) +} + +func imageFileFilter(ctx context.Context, f file.File) bool { + return isImage(f.Base().Basename) +} + +func galleryFileFilter(ctx context.Context, f file.File) bool { + return isZip(f.Base().Basename) +} + +type coverGenerator struct { +} + +func (g *coverGenerator) GenerateCover(ctx context.Context, scene *models.Scene, f *file.VideoFile) error { + gg := generate.Generator{ + Encoder: instance.FFMPEG, + LockManager: instance.ReadLockManager, + ScenePaths: instance.Paths.Scene, + } + + return gg.Screenshot(ctx, f.Path, scene.GetHash(instance.Config.GetVideoFileNamingAlgorithm()), f.Width, f.Duration, generate.ScreenshotOptions{}) +} + +func makeScanner(db *sqlite.Database, pluginCache *plugin.Cache) *file.Scanner { + return &file.Scanner{ + Repository: file.Repository{ + Manager: db, + DatabaseProvider: db, + Store: db.File, + FolderStore: db.Folder, + }, + FileDecorators: []file.Decorator{ + &file.FilteredDecorator{ + Decorator: &video.Decorator{ + FFProbe: instance.FFProbe, + }, + Filter: file.FilterFunc(videoFileFilter), + }, + &file.FilteredDecorator{ + Decorator: &file_image.Decorator{}, + Filter: file.FilterFunc(imageFileFilter), + }, + }, + FingerprintCalculator: &fingerprintCalculator{instance.Config}, + FS: &file.OsFS{}, + } +} + +func makeCleaner(db *sqlite.Database, pluginCache *plugin.Cache) *file.Cleaner { + return &file.Cleaner{ + FS: &file.OsFS{}, + Repository: file.Repository{ + Manager: db, + DatabaseProvider: db, + Store: db.File, + FolderStore: db.Folder, + }, + Handlers: []file.CleanHandler{ + &cleanHandler{}, + }, + } +} + func initJobManager() *job.Manager { ret := job.NewManager() @@ -279,8 +465,12 @@ func (s *Manager) PostInit(ctx context.Context) error { if err := fsutil.EmptyDir(instance.Paths.Generated.Downloads); err != nil { logger.Warnf("could not empty Downloads directory: %v", err) } - if err := fsutil.EmptyDir(instance.Paths.Generated.Tmp); err != nil { - logger.Warnf("could not empty Tmp directory: %v", err) + if err := fsutil.EnsureDir(instance.Paths.Generated.Tmp); err != nil { + logger.Warnf("could not create Tmp directory: %v", err) + } else { + if err := fsutil.EmptyDir(instance.Paths.Generated.Tmp); err != nil { + logger.Warnf("could not empty Tmp directory: %v", err) + } } }, deleteTimeout, func(done chan struct{}) { logger.Info("Please wait. Deleting temporary files...") // print @@ -289,7 +479,8 @@ func (s *Manager) PostInit(ctx context.Context) error { }) } - if err := database.Initialize(s.Config.GetDatabasePath()); err != nil { + database := s.Database + if err := database.Open(s.Config.GetDatabasePath()); err != nil { return err } @@ -314,7 +505,14 @@ func writeStashIcon() { // initScraperCache initializes a new scraper cache and returns it. func (s *Manager) initScraperCache() *scraper.Cache { - ret, err := scraper.NewCache(config.GetInstance(), s.TxnManager) + ret, err := scraper.NewCache(config.GetInstance(), s.Repository, scraper.Repository{ + SceneFinder: s.Repository.Scene, + GalleryFinder: s.Repository.Gallery, + TagFinder: s.Repository.Tag, + PerformerFinder: s.Repository.Performer, + MovieFinder: s.Repository.Movie, + StudioFinder: s.Repository.Studio, + }) if err != nil { logger.Errorf("Error reading scraper configs: %s", err.Error()) @@ -354,7 +552,7 @@ func (s *Manager) RefreshScraperCache() { s.ScraperCache = s.initScraperCache() } -func setSetupDefaults(input *models.SetupInput) { +func setSetupDefaults(input *SetupInput) { if input.ConfigLocation == "" { input.ConfigLocation = filepath.Join(fsutil.GetHomeDirectory(), ".stash", "config.yml") } @@ -369,7 +567,7 @@ func setSetupDefaults(input *models.SetupInput) { } } -func (s *Manager) Setup(ctx context.Context, input models.SetupInput) error { +func (s *Manager) Setup(ctx context.Context, input SetupInput) error { setSetupDefaults(&input) c := s.Config @@ -413,7 +611,12 @@ func (s *Manager) Setup(ctx context.Context, input models.SetupInput) error { // initialise the database if err := s.PostInit(ctx); err != nil { - return fmt.Errorf("error initializing the database: %v", err) + var migrationNeededErr *sqlite.MigrationNeededError + if errors.As(err, &migrationNeededErr) { + logger.Warn(err.Error()) + } else { + return fmt.Errorf("error initializing the database: %v", err) + } } s.Config.FinalizeSetup() @@ -422,6 +625,8 @@ func (s *Manager) Setup(ctx context.Context, input models.SetupInput) error { return fmt.Errorf("error initializing FFMPEG subsystem: %v", err) } + instance.Scanner = makeScanner(instance.Database, instance.PluginCache) + return nil } @@ -433,7 +638,13 @@ func (s *Manager) validateFFMPEG() error { return nil } -func (s *Manager) Migrate(ctx context.Context, input models.MigrateInput) error { +type MigrateInput struct { + BackupPath string `json:"backupPath"` +} + +func (s *Manager) Migrate(ctx context.Context, input MigrateInput) error { + database := s.Database + // always backup so that we can roll back to the previous version if // migration fails backupPath := input.BackupPath @@ -442,7 +653,7 @@ func (s *Manager) Migrate(ctx context.Context, input models.MigrateInput) error } // perform database backup - if err := database.Backup(database.DB, backupPath); err != nil { + if err := database.Backup(backupPath); err != nil { return fmt.Errorf("error backing up database: %s", err) } @@ -473,20 +684,21 @@ func (s *Manager) Migrate(ctx context.Context, input models.MigrateInput) error return nil } -func (s *Manager) GetSystemStatus() *models.SystemStatus { - status := models.SystemStatusEnumOk +func (s *Manager) GetSystemStatus() *SystemStatus { + database := s.Database + status := SystemStatusEnumOk dbSchema := int(database.Version()) dbPath := database.DatabasePath() appSchema := int(database.AppSchemaVersion()) configFile := s.Config.GetConfigFile() if s.Config.IsNewSystem() { - status = models.SystemStatusEnumSetup + status = SystemStatusEnumSetup } else if dbSchema < appSchema { - status = models.SystemStatusEnumNeedsMigration + status = SystemStatusEnumNeedsMigration } - return &models.SystemStatus{ + return &SystemStatus{ DatabaseSchema: &dbSchema, DatabasePath: &dbPath, AppSchema: appSchema, @@ -502,7 +714,7 @@ func (s *Manager) Shutdown(code int) { // TODO: Each part of the manager needs to gracefully stop at some point // for now, we just close the database. - err := database.Close() + err := s.Database.Close() if err != nil { logger.Errorf("Error closing database: %s", err) if code == 0 { diff --git a/internal/manager/manager_tasks.go b/internal/manager/manager_tasks.go index 209e89ceb..33354073d 100644 --- a/internal/manager/manager_tasks.go +++ b/internal/manager/manager_tasks.go @@ -6,24 +6,20 @@ import ( "fmt" "strconv" "sync" + "time" "github.com/stashapp/stash/internal/manager/config" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/job" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/scene" ) -func isGallery(pathname string) bool { +func isZip(pathname string) bool { gExt := config.GetInstance().GetGalleryExtensions() return fsutil.MatchExtension(pathname, gExt) } -func isCaptions(pathname string) bool { - return fsutil.MatchExtension(pathname, scene.CaptionExts) -} - func isVideo(pathname string) bool { vidExt := config.GetInstance().GetVideoExtensions() return fsutil.MatchExtension(pathname, vidExt) @@ -34,14 +30,16 @@ func isImage(pathname string) bool { return fsutil.MatchExtension(pathname, imgExt) } -func getScanPaths(inputPaths []string) []*models.StashConfig { +func getScanPaths(inputPaths []string) []*config.StashConfig { + stashPaths := config.GetInstance().GetStashPaths() + if len(inputPaths) == 0 { - return config.GetInstance().GetStashPaths() + return stashPaths } - var ret []*models.StashConfig + var ret []*config.StashConfig for _, p := range inputPaths { - s := getStashFromDirPath(p) + s := getStashFromDirPath(stashPaths, p) if s == nil { logger.Warnf("%s is not in the configured stash paths", p) continue @@ -62,13 +60,28 @@ func (s *Manager) ScanSubscribe(ctx context.Context) <-chan bool { return s.scanSubs.subscribe(ctx) } -func (s *Manager) Scan(ctx context.Context, input models.ScanMetadataInput) (int, error) { +type ScanMetadataInput struct { + Paths []string `json:"paths"` + + config.ScanMetadataOptions `mapstructure:",squash"` + + // Filter options for the scan + Filter *ScanMetaDataFilterInput `json:"filter"` +} + +// Filter options for meta data scannning +type ScanMetaDataFilterInput struct { + // If set, files with a modification time before this time point are ignored by the scan + MinModTime *time.Time `json:"minModTime"` +} + +func (s *Manager) Scan(ctx context.Context, input ScanMetadataInput) (int, error) { if err := s.validateFFMPEG(); err != nil { return 0, err } scanJob := ScanJob{ - txnManager: s.TxnManager, + scanner: s.Scanner, input: input, subscriptions: s.scanSubs, } @@ -85,10 +98,10 @@ func (s *Manager) Import(ctx context.Context) (int, error) { j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) { task := ImportTask{ - txnManager: s.TxnManager, + txnManager: s.Repository, BaseDir: metadataPath, Reset: true, - DuplicateBehaviour: models.ImportDuplicateEnumFail, + DuplicateBehaviour: ImportDuplicateEnumFail, MissingRefBehaviour: models.ImportMissingRefEnumFail, fileNamingAlgorithm: config.GetVideoFileNamingAlgorithm(), } @@ -109,7 +122,7 @@ func (s *Manager) Export(ctx context.Context) (int, error) { var wg sync.WaitGroup wg.Add(1) task := ExportTask{ - txnManager: s.TxnManager, + txnManager: s.Repository, full: true, fileNamingAlgorithm: config.GetVideoFileNamingAlgorithm(), } @@ -131,7 +144,7 @@ func (s *Manager) RunSingleTask(ctx context.Context, t Task) int { return s.JobManager.Add(ctx, t.GetDescription(), j) } -func (s *Manager) Generate(ctx context.Context, input models.GenerateMetadataInput) (int, error) { +func (s *Manager) Generate(ctx context.Context, input GenerateMetadataInput) (int, error) { if err := s.validateFFMPEG(); err != nil { return 0, err } @@ -140,7 +153,7 @@ func (s *Manager) Generate(ctx context.Context, input models.GenerateMetadataInp } j := &GenerateJob{ - txnManager: s.TxnManager, + txnManager: s.Repository, input: input, } @@ -169,9 +182,12 @@ func (s *Manager) generateScreenshot(ctx context.Context, sceneId string, at *fl } var scene *models.Scene - if err := s.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { + if err := s.Repository.WithTxn(ctx, func(ctx context.Context) error { var err error - scene, err = r.Scene().Find(sceneIdInt) + scene, err = s.Repository.Scene.Find(ctx, sceneIdInt) + if scene != nil { + err = scene.LoadPrimaryFile(ctx, s.Repository.File) + } return err }); err != nil || scene == nil { logger.Errorf("failed to get scene for generate: %s", err.Error()) @@ -179,7 +195,7 @@ func (s *Manager) generateScreenshot(ctx context.Context, sceneId string, at *fl } task := GenerateScreenshotTask{ - txnManager: s.TxnManager, + txnManager: s.Repository, Scene: *scene, ScreenshotAt: at, fileNamingAlgorithm: config.GetInstance().GetVideoFileNamingAlgorithm(), @@ -193,20 +209,40 @@ func (s *Manager) generateScreenshot(ctx context.Context, sceneId string, at *fl return s.JobManager.Add(ctx, fmt.Sprintf("Generating screenshot for scene id %s", sceneId), j) } -func (s *Manager) AutoTag(ctx context.Context, input models.AutoTagMetadataInput) int { +type AutoTagMetadataInput struct { + // Paths to tag, null for all files + Paths []string `json:"paths"` + // IDs of performers to tag files with, or "*" for all + Performers []string `json:"performers"` + // IDs of studios to tag files with, or "*" for all + Studios []string `json:"studios"` + // IDs of tags to tag files with, or "*" for all + Tags []string `json:"tags"` +} + +func (s *Manager) AutoTag(ctx context.Context, input AutoTagMetadataInput) int { j := autoTagJob{ - txnManager: s.TxnManager, + txnManager: s.Repository, input: input, } return s.JobManager.Add(ctx, "Auto-tagging...", &j) } -func (s *Manager) Clean(ctx context.Context, input models.CleanMetadataInput) int { +type CleanMetadataInput struct { + Paths []string `json:"paths"` + // Do a dry run. Don't delete any files + DryRun bool `json:"dryRun"` +} + +func (s *Manager) Clean(ctx context.Context, input CleanMetadataInput) int { j := cleanJob{ - txnManager: s.TxnManager, - input: input, - scanSubs: s.scanSubs, + cleaner: s.Cleaner, + txnManager: s.Repository, + sceneService: s.SceneService, + imageService: s.ImageService, + input: input, + scanSubs: s.scanSubs, } return s.JobManager.Add(ctx, "Cleaning...", &j) @@ -218,9 +254,9 @@ func (s *Manager) MigrateHash(ctx context.Context) int { logger.Infof("Migrating generated files for %s naming hash", fileNamingAlgo.String()) var scenes []*models.Scene - if err := s.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { + if err := s.Repository.WithTxn(ctx, func(ctx context.Context) error { var err error - scenes, err = r.Scene().All() + scenes, err = s.Repository.Scene.All(ctx) return err }); err != nil { logger.Errorf("failed to fetch list of scenes for migration: %s", err.Error()) @@ -260,7 +296,21 @@ func (s *Manager) MigrateHash(ctx context.Context) int { return s.JobManager.Add(ctx, "Migrating scene hashes...", j) } -func (s *Manager) StashBoxBatchPerformerTag(ctx context.Context, input models.StashBoxBatchPerformerTagInput) int { +// If neither performer_ids nor performer_names are set, tag all performers +type StashBoxBatchPerformerTagInput struct { + // Stash endpoint to use for the performer tagging + Endpoint int `json:"endpoint"` + // Fields to exclude when executing the performer tagging + ExcludeFields []string `json:"exclude_fields"` + // Refresh performers already tagged by StashBox if true. Only tag performers with no StashBox tagging if false + Refresh bool `json:"refresh"` + // If set, only tag these performer ids + PerformerIds []string `json:"performer_ids"` + // If set, only tag these performer names + PerformerNames []string `json:"performer_names"` +} + +func (s *Manager) StashBoxBatchPerformerTag(ctx context.Context, input StashBoxBatchPerformerTagInput) int { j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) { logger.Infof("Initiating stash-box batch performer tag") @@ -280,15 +330,14 @@ func (s *Manager) StashBoxBatchPerformerTag(ctx context.Context, input models.St // This is why we mark this section nolint. In principle, we should look to // rewrite the section at some point, to avoid the linter warning. if len(input.PerformerIds) > 0 { //nolint:gocritic - if err := s.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - performerQuery := r.Performer() + if err := s.Repository.WithTxn(ctx, func(ctx context.Context) error { + performerQuery := s.Repository.Performer for _, performerID := range input.PerformerIds { if id, err := strconv.Atoi(performerID); err == nil { - performer, err := performerQuery.Find(id) + performer, err := performerQuery.Find(ctx, id) if err == nil { tasks = append(tasks, StashBoxPerformerTagTask{ - txnManager: s.TxnManager, performer: performer, refresh: input.Refresh, box: box, @@ -307,7 +356,6 @@ func (s *Manager) StashBoxBatchPerformerTag(ctx context.Context, input models.St for i := range input.PerformerNames { if len(input.PerformerNames[i]) > 0 { tasks = append(tasks, StashBoxPerformerTagTask{ - txnManager: s.TxnManager, name: &input.PerformerNames[i], refresh: input.Refresh, box: box, @@ -320,14 +368,14 @@ func (s *Manager) StashBoxBatchPerformerTag(ctx context.Context, input models.St // However, this doesn't really help with readability of the current section. Mark it // as nolint for now. In the future we'd like to rewrite this code by factoring some of // this into separate functions. - if err := s.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - performerQuery := r.Performer() + if err := s.Repository.WithTxn(ctx, func(ctx context.Context) error { + performerQuery := s.Repository.Performer var performers []*models.Performer var err error if input.Refresh { - performers, err = performerQuery.FindByStashIDStatus(true, box.Endpoint) + performers, err = performerQuery.FindByStashIDStatus(ctx, true, box.Endpoint) } else { - performers, err = performerQuery.FindByStashIDStatus(false, box.Endpoint) + performers, err = performerQuery.FindByStashIDStatus(ctx, false, box.Endpoint) } if err != nil { return fmt.Errorf("error querying performers: %v", err) @@ -335,7 +383,6 @@ func (s *Manager) StashBoxBatchPerformerTag(ctx context.Context, input models.St for _, performer := range performers { tasks = append(tasks, StashBoxPerformerTagTask{ - txnManager: s.TxnManager, performer: performer, refresh: input.Refresh, box: box, diff --git a/internal/manager/post_migrate.go b/internal/manager/post_migrate.go index 1db1aac40..acc93ae69 100644 --- a/internal/manager/post_migrate.go +++ b/internal/manager/post_migrate.go @@ -4,5 +4,5 @@ import "context" // PostMigrate is executed after migrations have been executed. func (s *Manager) PostMigrate(ctx context.Context) { - setInitialMD5Config(ctx, s.TxnManager) + setInitialMD5Config(ctx, s.Repository, s.Repository.Scene) } diff --git a/internal/manager/repository.go b/internal/manager/repository.go new file mode 100644 index 000000000..ea3eb04e0 --- /dev/null +++ b/internal/manager/repository.go @@ -0,0 +1,100 @@ +package manager + +import ( + "context" + + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/gallery" + "github.com/stashapp/stash/pkg/image" + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/sqlite" + "github.com/stashapp/stash/pkg/txn" +) + +type ImageReaderWriter interface { + models.ImageReaderWriter + image.FinderCreatorUpdater + models.ImageFileLoader + GetManyFileIDs(ctx context.Context, ids []int) ([][]file.ID, error) +} + +type GalleryReaderWriter interface { + models.GalleryReaderWriter + gallery.FinderCreatorUpdater + gallery.Finder + models.FileLoader + GetManyFileIDs(ctx context.Context, ids []int) ([][]file.ID, error) +} + +type SceneReaderWriter interface { + models.SceneReaderWriter + scene.CreatorUpdater + GetManyFileIDs(ctx context.Context, ids []int) ([][]file.ID, error) +} + +type FileReaderWriter interface { + file.Store + file.Finder + Query(ctx context.Context, options models.FileQueryOptions) (*models.FileQueryResult, error) + GetCaptions(ctx context.Context, fileID file.ID) ([]*models.VideoCaption, error) +} + +type FolderReaderWriter interface { + file.FolderStore + Find(ctx context.Context, id file.FolderID) (*file.Folder, error) +} + +type Repository struct { + models.TxnManager + + File FileReaderWriter + Folder FolderReaderWriter + Gallery GalleryReaderWriter + Image ImageReaderWriter + Movie models.MovieReaderWriter + Performer models.PerformerReaderWriter + Scene SceneReaderWriter + SceneMarker models.SceneMarkerReaderWriter + ScrapedItem models.ScrapedItemReaderWriter + Studio models.StudioReaderWriter + Tag models.TagReaderWriter + SavedFilter models.SavedFilterReaderWriter +} + +func (r *Repository) WithTxn(ctx context.Context, fn txn.TxnFunc) error { + return txn.WithTxn(ctx, r, fn) +} + +func sqliteRepository(d *sqlite.Database) Repository { + txnRepo := d.TxnRepository() + + return Repository{ + TxnManager: txnRepo, + File: d.File, + Folder: d.Folder, + Gallery: d.Gallery, + Image: d.Image, + Movie: txnRepo.Movie, + Performer: txnRepo.Performer, + Scene: d.Scene, + SceneMarker: txnRepo.SceneMarker, + ScrapedItem: txnRepo.ScrapedItem, + Studio: txnRepo.Studio, + Tag: txnRepo.Tag, + SavedFilter: txnRepo.SavedFilter, + } +} + +type SceneService interface { + Destroy(ctx context.Context, scene *models.Scene, fileDeleter *scene.FileDeleter, deleteGenerated, deleteFile bool) error +} + +type ImageService interface { + Destroy(ctx context.Context, image *models.Image, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) error + DestroyZipImages(ctx context.Context, zipFile file.File, fileDeleter *image.FileDeleter, deleteGenerated bool) ([]*models.Image, error) +} + +type GalleryService interface { + Destroy(ctx context.Context, i *models.Gallery, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) ([]*models.Image, error) +} diff --git a/internal/manager/running_streams.go b/internal/manager/running_streams.go index 41c196462..9d43d26d2 100644 --- a/internal/manager/running_streams.go +++ b/internal/manager/running_streams.go @@ -8,6 +8,7 @@ import ( "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/utils" ) @@ -49,8 +50,13 @@ func KillRunningStreams(scene *models.Scene, fileNamingAlgo models.HashAlgorithm instance.ReadLockManager.Cancel(transcodePath) } +type SceneCoverGetter interface { + GetCover(ctx context.Context, sceneID int) ([]byte, error) +} + type SceneServer struct { - TXNManager models.TransactionManager + TxnManager txn.Manager + SceneCoverGetter SceneCoverGetter } func (s *SceneServer) StreamSceneDirect(scene *models.Scene, w http.ResponseWriter, r *http.Request) { @@ -75,8 +81,8 @@ func (s *SceneServer) ServeScreenshot(scene *models.Scene, w http.ResponseWriter http.ServeFile(w, r, filepath) } else { var cover []byte - err := s.TXNManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error { - cover, _ = repo.Scene().GetCover(scene.ID) + err := txn.WithTxn(r.Context(), s.TxnManager, func(ctx context.Context) error { + cover, _ = s.SceneCoverGetter.GetCover(ctx, scene.ID) return nil }) if err != nil { diff --git a/internal/manager/scene.go b/internal/manager/scene.go index e0aee54ea..f9693787b 100644 --- a/internal/manager/scene.go +++ b/internal/manager/scene.go @@ -5,35 +5,37 @@ import ( "github.com/stashapp/stash/internal/manager/config" "github.com/stashapp/stash/pkg/ffmpeg" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/models" ) -func GetSceneFileContainer(scene *models.Scene) (ffmpeg.Container, error) { +func GetVideoFileContainer(file *file.VideoFile) (ffmpeg.Container, error) { var container ffmpeg.Container - if scene.Format.Valid { - container = ffmpeg.Container(scene.Format.String) + format := file.Format + if format != "" { + container = ffmpeg.Container(format) } else { // container isn't in the DB // shouldn't happen, fallback to ffprobe ffprobe := GetInstance().FFProbe - tmpVideoFile, err := ffprobe.NewVideoFile(scene.Path) + tmpVideoFile, err := ffprobe.NewVideoFile(file.Path) if err != nil { return ffmpeg.Container(""), fmt.Errorf("error reading video file: %v", err) } - return ffmpeg.MatchContainer(tmpVideoFile.Container, scene.Path) + return ffmpeg.MatchContainer(tmpVideoFile.Container, file.Path) } return container, nil } -func includeSceneStreamPath(scene *models.Scene, streamingResolution models.StreamingResolutionEnum, maxStreamingTranscodeSize models.StreamingResolutionEnum) bool { +func includeSceneStreamPath(f *file.VideoFile, streamingResolution models.StreamingResolutionEnum, maxStreamingTranscodeSize models.StreamingResolutionEnum) bool { // convert StreamingResolutionEnum to ResolutionEnum so we can get the min // resolution convertedRes := models.ResolutionEnum(streamingResolution) - minResolution := int64(convertedRes.GetMinResolution()) - sceneResolution := scene.GetMinResolution() + minResolution := convertedRes.GetMinResolution() + sceneResolution := f.GetMinResolution() // don't include if scene resolution is smaller than the streamingResolution if sceneResolution != 0 && sceneResolution < minResolution { @@ -47,23 +49,34 @@ func includeSceneStreamPath(scene *models.Scene, streamingResolution models.Stre // convert StreamingResolutionEnum to ResolutionEnum maxStreamingResolution := models.ResolutionEnum(maxStreamingTranscodeSize) - return int64(maxStreamingResolution.GetMinResolution()) >= minResolution + return maxStreamingResolution.GetMinResolution() >= minResolution } -func makeStreamEndpoint(streamURL string, streamingResolution models.StreamingResolutionEnum, mimeType, label string) *models.SceneStreamEndpoint { - return &models.SceneStreamEndpoint{ +type SceneStreamEndpoint struct { + URL string `json:"url"` + MimeType *string `json:"mime_type"` + Label *string `json:"label"` +} + +func makeStreamEndpoint(streamURL string, streamingResolution models.StreamingResolutionEnum, mimeType, label string) *SceneStreamEndpoint { + return &SceneStreamEndpoint{ URL: fmt.Sprintf("%s?resolution=%s", streamURL, streamingResolution.String()), MimeType: &mimeType, Label: &label, } } -func GetSceneStreamPaths(scene *models.Scene, directStreamURL string, maxStreamingTranscodeSize models.StreamingResolutionEnum) ([]*models.SceneStreamEndpoint, error) { +func GetSceneStreamPaths(scene *models.Scene, directStreamURL string, maxStreamingTranscodeSize models.StreamingResolutionEnum) ([]*SceneStreamEndpoint, error) { if scene == nil { return nil, fmt.Errorf("nil scene") } - var ret []*models.SceneStreamEndpoint + pf := scene.Files.Primary() + if pf == nil { + return nil, fmt.Errorf("nil file") + } + + var ret []*SceneStreamEndpoint mimeWebm := ffmpeg.MimeWebm mimeHLS := ffmpeg.MimeHLS mimeMp4 := ffmpeg.MimeMp4 @@ -73,16 +86,16 @@ func GetSceneStreamPaths(scene *models.Scene, directStreamURL string, maxStreami // direct stream should only apply when the audio codec is supported audioCodec := ffmpeg.MissingUnsupported - if scene.AudioCodec.Valid { - audioCodec = ffmpeg.ProbeAudioCodec(scene.AudioCodec.String) + if pf.AudioCodec != "" { + audioCodec = ffmpeg.ProbeAudioCodec(pf.AudioCodec) } // don't care if we can't get the container - container, _ := GetSceneFileContainer(scene) + container, _ := GetVideoFileContainer(pf) if HasTranscode(scene, config.GetInstance().GetVideoFileNamingAlgorithm()) || ffmpeg.IsValidAudioForContainer(audioCodec, container) { label := "Direct stream" - ret = append(ret, &models.SceneStreamEndpoint{ + ret = append(ret, &SceneStreamEndpoint{ URL: directStreamURL, MimeType: &mimeMp4, Label: &label, @@ -92,7 +105,7 @@ func GetSceneStreamPaths(scene *models.Scene, directStreamURL string, maxStreami // only add mkv stream endpoint if the scene container is an mkv already if container == ffmpeg.Matroska { label := "mkv" - ret = append(ret, &models.SceneStreamEndpoint{ + ret = append(ret, &SceneStreamEndpoint{ URL: directStreamURL + ".mkv", // set mkv to mp4 to trick the client, since many clients won't try mkv MimeType: &mimeMp4, @@ -115,33 +128,33 @@ func GetSceneStreamPaths(scene *models.Scene, directStreamURL string, maxStreami mp4LabelStandard := "MP4 Standard (480p)" // "STANDARD" mp4LabelLow := "MP4 Low (240p)" // "LOW" - var webmStreams []*models.SceneStreamEndpoint - var mp4Streams []*models.SceneStreamEndpoint + var webmStreams []*SceneStreamEndpoint + var mp4Streams []*SceneStreamEndpoint webmURL := directStreamURL + ".webm" mp4URL := directStreamURL + ".mp4" - if includeSceneStreamPath(scene, models.StreamingResolutionEnumFourK, maxStreamingTranscodeSize) { + if includeSceneStreamPath(pf, models.StreamingResolutionEnumFourK, maxStreamingTranscodeSize) { webmStreams = append(webmStreams, makeStreamEndpoint(webmURL, models.StreamingResolutionEnumFourK, mimeMp4, webmLabelFourK)) mp4Streams = append(mp4Streams, makeStreamEndpoint(mp4URL, models.StreamingResolutionEnumFourK, mimeMp4, mp4LabelFourK)) } - if includeSceneStreamPath(scene, models.StreamingResolutionEnumFullHd, maxStreamingTranscodeSize) { + if includeSceneStreamPath(pf, models.StreamingResolutionEnumFullHd, maxStreamingTranscodeSize) { webmStreams = append(webmStreams, makeStreamEndpoint(webmURL, models.StreamingResolutionEnumFullHd, mimeMp4, webmLabelFullHD)) mp4Streams = append(mp4Streams, makeStreamEndpoint(mp4URL, models.StreamingResolutionEnumFullHd, mimeMp4, mp4LabelFullHD)) } - if includeSceneStreamPath(scene, models.StreamingResolutionEnumStandardHd, maxStreamingTranscodeSize) { + if includeSceneStreamPath(pf, models.StreamingResolutionEnumStandardHd, maxStreamingTranscodeSize) { webmStreams = append(webmStreams, makeStreamEndpoint(webmURL, models.StreamingResolutionEnumStandardHd, mimeMp4, webmLabelStandardHD)) mp4Streams = append(mp4Streams, makeStreamEndpoint(mp4URL, models.StreamingResolutionEnumStandardHd, mimeMp4, mp4LabelStandardHD)) } - if includeSceneStreamPath(scene, models.StreamingResolutionEnumStandard, maxStreamingTranscodeSize) { + if includeSceneStreamPath(pf, models.StreamingResolutionEnumStandard, maxStreamingTranscodeSize) { webmStreams = append(webmStreams, makeStreamEndpoint(webmURL, models.StreamingResolutionEnumStandard, mimeMp4, webmLabelStandard)) mp4Streams = append(mp4Streams, makeStreamEndpoint(mp4URL, models.StreamingResolutionEnumStandard, mimeMp4, mp4LabelStandard)) } - if includeSceneStreamPath(scene, models.StreamingResolutionEnumLow, maxStreamingTranscodeSize) { + if includeSceneStreamPath(pf, models.StreamingResolutionEnumLow, maxStreamingTranscodeSize) { webmStreams = append(webmStreams, makeStreamEndpoint(webmURL, models.StreamingResolutionEnumLow, mimeMp4, webmLabelLow)) mp4Streams = append(mp4Streams, makeStreamEndpoint(mp4URL, models.StreamingResolutionEnumLow, mimeMp4, mp4LabelLow)) } @@ -149,7 +162,7 @@ func GetSceneStreamPaths(scene *models.Scene, directStreamURL string, maxStreami ret = append(ret, webmStreams...) ret = append(ret, mp4Streams...) - defaultStreams := []*models.SceneStreamEndpoint{ + defaultStreams := []*SceneStreamEndpoint{ { URL: directStreamURL + ".webm", MimeType: &mimeWebm, @@ -159,7 +172,7 @@ func GetSceneStreamPaths(scene *models.Scene, directStreamURL string, maxStreami ret = append(ret, defaultStreams...) - hls := models.SceneStreamEndpoint{ + hls := SceneStreamEndpoint{ URL: directStreamURL + ".m3u8", MimeType: &mimeHLS, Label: &labelHLS, diff --git a/internal/manager/studio.go b/internal/manager/studio.go index 3b0d81ceb..6b517af6f 100644 --- a/internal/manager/studio.go +++ b/internal/manager/studio.go @@ -1,13 +1,15 @@ package manager import ( + "context" "errors" "fmt" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/studio" ) -func ValidateModifyStudio(studio models.StudioPartial, qb models.StudioReader) error { +func ValidateModifyStudio(ctx context.Context, studio models.StudioPartial, qb studio.Finder) error { if studio.ParentID == nil || !studio.ParentID.Valid { return nil } @@ -22,7 +24,7 @@ func ValidateModifyStudio(studio models.StudioPartial, qb models.StudioReader) e return errors.New("studio cannot be an ancestor of itself") } - currentStudio, err := qb.Find(int(currentParentID.Int64)) + currentStudio, err := qb.Find(ctx, int(currentParentID.Int64)) if err != nil { return fmt.Errorf("error finding parent studio: %v", err) } diff --git a/internal/manager/task_autotag.go b/internal/manager/task_autotag.go index 369d31754..62edb6597 100644 --- a/internal/manager/task_autotag.go +++ b/internal/manager/task_autotag.go @@ -19,8 +19,8 @@ import ( ) type autoTagJob struct { - txnManager models.TransactionManager - input models.AutoTagMetadataInput + txnManager Repository + input AutoTagMetadataInput cache match.Cache } @@ -40,7 +40,7 @@ func (j *autoTagJob) Execute(ctx context.Context, progress *job.Progress) { logger.Infof("Finished autotag after %s", time.Since(begin).String()) } -func (j *autoTagJob) isFileBasedAutoTag(input models.AutoTagMetadataInput) bool { +func (j *autoTagJob) isFileBasedAutoTag(input AutoTagMetadataInput) bool { const wildcard = "*" performerIds := input.Performers studioIds := input.Studios @@ -73,27 +73,28 @@ func (j *autoTagJob) autoTagSpecific(ctx context.Context, progress *job.Progress studioCount := len(studioIds) tagCount := len(tagIds) - if err := j.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - performerQuery := r.Performer() - studioQuery := r.Studio() - tagQuery := r.Tag() + if err := j.txnManager.WithTxn(ctx, func(ctx context.Context) error { + r := j.txnManager + performerQuery := r.Performer + studioQuery := r.Studio + tagQuery := r.Tag const wildcard = "*" var err error if performerCount == 1 && performerIds[0] == wildcard { - performerCount, err = performerQuery.Count() + performerCount, err = performerQuery.Count(ctx) if err != nil { return fmt.Errorf("error getting performer count: %v", err) } } if studioCount == 1 && studioIds[0] == wildcard { - studioCount, err = studioQuery.Count() + studioCount, err = studioQuery.Count(ctx) if err != nil { return fmt.Errorf("error getting studio count: %v", err) } } if tagCount == 1 && tagIds[0] == wildcard { - tagCount, err = tagQuery.Count() + tagCount, err = tagQuery.Count(ctx) if err != nil { return fmt.Errorf("error getting tag count: %v", err) } @@ -123,14 +124,14 @@ func (j *autoTagJob) autoTagPerformers(ctx context.Context, progress *job.Progre for _, performerId := range performerIds { var performers []*models.Performer - if err := j.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - performerQuery := r.Performer() + if err := j.txnManager.WithTxn(ctx, func(ctx context.Context) error { + performerQuery := j.txnManager.Performer ignoreAutoTag := false perPage := -1 if performerId == "*" { var err error - performers, _, err = performerQuery.Query(&models.PerformerFilterType{ + performers, _, err = performerQuery.Query(ctx, &models.PerformerFilterType{ IgnoreAutoTag: &ignoreAutoTag, }, &models.FindFilterType{ PerPage: &perPage, @@ -144,7 +145,7 @@ func (j *autoTagJob) autoTagPerformers(ctx context.Context, progress *job.Progre return fmt.Errorf("error parsing performer id %s: %s", performerId, err.Error()) } - performer, err := performerQuery.Find(performerIdInt) + performer, err := performerQuery.Find(ctx, performerIdInt) if err != nil { return fmt.Errorf("error finding performer id %s: %s", performerId, err.Error()) } @@ -161,15 +162,16 @@ func (j *autoTagJob) autoTagPerformers(ctx context.Context, progress *job.Progre return nil } - if err := j.txnManager.WithTxn(ctx, func(r models.Repository) error { - if err := autotag.PerformerScenes(performer, paths, r.Scene(), &j.cache); err != nil { - return err + if err := j.txnManager.WithTxn(ctx, func(ctx context.Context) error { + r := j.txnManager + if err := autotag.PerformerScenes(ctx, performer, paths, r.Scene, &j.cache); err != nil { + return fmt.Errorf("processing scenes: %w", err) } - if err := autotag.PerformerImages(performer, paths, r.Image(), &j.cache); err != nil { - return err + if err := autotag.PerformerImages(ctx, performer, paths, r.Image, &j.cache); err != nil { + return fmt.Errorf("processing images: %w", err) } - if err := autotag.PerformerGalleries(performer, paths, r.Gallery(), &j.cache); err != nil { - return err + if err := autotag.PerformerGalleries(ctx, performer, paths, r.Gallery, &j.cache); err != nil { + return fmt.Errorf("processing galleries: %w", err) } return nil @@ -193,16 +195,18 @@ func (j *autoTagJob) autoTagStudios(ctx context.Context, progress *job.Progress, return } + r := j.txnManager + for _, studioId := range studioIds { var studios []*models.Studio - if err := j.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - studioQuery := r.Studio() + if err := r.WithTxn(ctx, func(ctx context.Context) error { + studioQuery := r.Studio ignoreAutoTag := false perPage := -1 if studioId == "*" { var err error - studios, _, err = studioQuery.Query(&models.StudioFilterType{ + studios, _, err = studioQuery.Query(ctx, &models.StudioFilterType{ IgnoreAutoTag: &ignoreAutoTag, }, &models.FindFilterType{ PerPage: &perPage, @@ -216,7 +220,7 @@ func (j *autoTagJob) autoTagStudios(ctx context.Context, progress *job.Progress, return fmt.Errorf("error parsing studio id %s: %s", studioId, err.Error()) } - studio, err := studioQuery.Find(studioIdInt) + studio, err := studioQuery.Find(ctx, studioIdInt) if err != nil { return fmt.Errorf("error finding studio id %s: %s", studioId, err.Error()) } @@ -234,20 +238,20 @@ func (j *autoTagJob) autoTagStudios(ctx context.Context, progress *job.Progress, return nil } - if err := j.txnManager.WithTxn(ctx, func(r models.Repository) error { - aliases, err := r.Studio().GetAliases(studio.ID) + if err := j.txnManager.WithTxn(ctx, func(ctx context.Context) error { + aliases, err := r.Studio.GetAliases(ctx, studio.ID) if err != nil { - return err + return fmt.Errorf("getting studio aliases: %w", err) } - if err := autotag.StudioScenes(studio, paths, aliases, r.Scene(), &j.cache); err != nil { - return err + if err := autotag.StudioScenes(ctx, studio, paths, aliases, r.Scene, &j.cache); err != nil { + return fmt.Errorf("processing scenes: %w", err) } - if err := autotag.StudioImages(studio, paths, aliases, r.Image(), &j.cache); err != nil { - return err + if err := autotag.StudioImages(ctx, studio, paths, aliases, r.Image, &j.cache); err != nil { + return fmt.Errorf("processing images: %w", err) } - if err := autotag.StudioGalleries(studio, paths, aliases, r.Gallery(), &j.cache); err != nil { - return err + if err := autotag.StudioGalleries(ctx, studio, paths, aliases, r.Gallery, &j.cache); err != nil { + return fmt.Errorf("processing galleries: %w", err) } return nil @@ -271,15 +275,17 @@ func (j *autoTagJob) autoTagTags(ctx context.Context, progress *job.Progress, pa return } + r := j.txnManager + for _, tagId := range tagIds { var tags []*models.Tag - if err := j.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - tagQuery := r.Tag() + if err := j.txnManager.WithTxn(ctx, func(ctx context.Context) error { + tagQuery := r.Tag ignoreAutoTag := false perPage := -1 if tagId == "*" { var err error - tags, _, err = tagQuery.Query(&models.TagFilterType{ + tags, _, err = tagQuery.Query(ctx, &models.TagFilterType{ IgnoreAutoTag: &ignoreAutoTag, }, &models.FindFilterType{ PerPage: &perPage, @@ -293,7 +299,7 @@ func (j *autoTagJob) autoTagTags(ctx context.Context, progress *job.Progress, pa return fmt.Errorf("error parsing tag id %s: %s", tagId, err.Error()) } - tag, err := tagQuery.Find(tagIdInt) + tag, err := tagQuery.Find(ctx, tagIdInt) if err != nil { return fmt.Errorf("error finding tag id %s: %s", tagId, err.Error()) } @@ -306,20 +312,20 @@ func (j *autoTagJob) autoTagTags(ctx context.Context, progress *job.Progress, pa return nil } - if err := j.txnManager.WithTxn(ctx, func(r models.Repository) error { - aliases, err := r.Tag().GetAliases(tag.ID) + if err := j.txnManager.WithTxn(ctx, func(ctx context.Context) error { + aliases, err := r.Tag.GetAliases(ctx, tag.ID) if err != nil { - return err + return fmt.Errorf("getting tag aliases: %w", err) } - if err := autotag.TagScenes(tag, paths, aliases, r.Scene(), &j.cache); err != nil { - return err + if err := autotag.TagScenes(ctx, tag, paths, aliases, r.Scene, &j.cache); err != nil { + return fmt.Errorf("processing scenes: %w", err) } - if err := autotag.TagImages(tag, paths, aliases, r.Image(), &j.cache); err != nil { - return err + if err := autotag.TagImages(ctx, tag, paths, aliases, r.Image, &j.cache); err != nil { + return fmt.Errorf("processing images: %w", err) } - if err := autotag.TagGalleries(tag, paths, aliases, r.Gallery(), &j.cache); err != nil { - return err + if err := autotag.TagGalleries(ctx, tag, paths, aliases, r.Gallery, &j.cache); err != nil { + return fmt.Errorf("processing galleries: %w", err) } return nil @@ -345,7 +351,7 @@ type autoTagFilesTask struct { tags bool progress *job.Progress - txnManager models.TransactionManager + txnManager Repository cache *match.Cache } @@ -425,13 +431,13 @@ func (t *autoTagFilesTask) makeGalleryFilter() *models.GalleryFilterType { return ret } -func (t *autoTagFilesTask) getCount(r models.ReaderRepository) (int, error) { +func (t *autoTagFilesTask) getCount(ctx context.Context, r Repository) (int, error) { pp := 0 findFilter := &models.FindFilterType{ PerPage: &pp, } - sceneResults, err := r.Scene().Query(models.SceneQueryOptions{ + sceneResults, err := r.Scene.Query(ctx, models.SceneQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: findFilter, Count: true, @@ -439,12 +445,12 @@ func (t *autoTagFilesTask) getCount(r models.ReaderRepository) (int, error) { SceneFilter: t.makeSceneFilter(), }) if err != nil { - return 0, err + return 0, fmt.Errorf("getting scene count: %w", err) } sceneCount := sceneResults.Count - imageResults, err := r.Image().Query(models.ImageQueryOptions{ + imageResults, err := r.Image.Query(ctx, models.ImageQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: findFilter, Count: true, @@ -452,20 +458,20 @@ func (t *autoTagFilesTask) getCount(r models.ReaderRepository) (int, error) { ImageFilter: t.makeImageFilter(), }) if err != nil { - return 0, err + return 0, fmt.Errorf("getting image count: %w", err) } imageCount := imageResults.Count - _, galleryCount, err := r.Gallery().Query(t.makeGalleryFilter(), findFilter) + _, galleryCount, err := r.Gallery.Query(ctx, t.makeGalleryFilter(), findFilter) if err != nil { - return 0, err + return 0, fmt.Errorf("getting gallery count: %w", err) } return sceneCount + imageCount + galleryCount, nil } -func (t *autoTagFilesTask) processScenes(ctx context.Context, r models.ReaderRepository) error { +func (t *autoTagFilesTask) processScenes(ctx context.Context, r Repository) error { if job.IsCancelled(ctx) { return nil } @@ -477,9 +483,13 @@ func (t *autoTagFilesTask) processScenes(ctx context.Context, r models.ReaderRep more := true for more { - scenes, err := scene.Query(r.Scene(), sceneFilter, findFilter) - if err != nil { + var scenes []*models.Scene + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + var err error + scenes, err = scene.Query(ctx, r.Scene, sceneFilter, findFilter) return err + }); err != nil { + return fmt.Errorf("querying scenes: %w", err) } for _, ss := range scenes { @@ -518,7 +528,7 @@ func (t *autoTagFilesTask) processScenes(ctx context.Context, r models.ReaderRep return nil } -func (t *autoTagFilesTask) processImages(ctx context.Context, r models.ReaderRepository) error { +func (t *autoTagFilesTask) processImages(ctx context.Context, r Repository) error { if job.IsCancelled(ctx) { return nil } @@ -530,9 +540,13 @@ func (t *autoTagFilesTask) processImages(ctx context.Context, r models.ReaderRep more := true for more { - images, err := image.Query(r.Image(), imageFilter, findFilter) - if err != nil { + var images []*models.Image + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + var err error + images, err = image.Query(ctx, r.Image, imageFilter, findFilter) return err + }); err != nil { + return fmt.Errorf("querying images: %w", err) } for _, ss := range images { @@ -571,7 +585,7 @@ func (t *autoTagFilesTask) processImages(ctx context.Context, r models.ReaderRep return nil } -func (t *autoTagFilesTask) processGalleries(ctx context.Context, r models.ReaderRepository) error { +func (t *autoTagFilesTask) processGalleries(ctx context.Context, r Repository) error { if job.IsCancelled(ctx) { return nil } @@ -583,9 +597,13 @@ func (t *autoTagFilesTask) processGalleries(ctx context.Context, r models.Reader more := true for more { - galleries, _, err := r.Gallery().Query(galleryFilter, findFilter) - if err != nil { + var galleries []*models.Gallery + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + var err error + galleries, _, err = r.Gallery.Query(ctx, galleryFilter, findFilter) return err + }); err != nil { + return fmt.Errorf("querying galleries: %w", err) } for _, ss := range galleries { @@ -625,43 +643,47 @@ func (t *autoTagFilesTask) processGalleries(ctx context.Context, r models.Reader } func (t *autoTagFilesTask) process(ctx context.Context) { - if err := t.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - total, err := t.getCount(r) + r := t.txnManager + if err := r.WithTxn(ctx, func(ctx context.Context) error { + total, err := t.getCount(ctx, t.txnManager) if err != nil { return err } t.progress.SetTotal(total) - logger.Infof("Starting autotag of %d files", total) - logger.Info("Autotagging scenes...") - if err := t.processScenes(ctx, r); err != nil { - return err - } - - logger.Info("Autotagging images...") - if err := t.processImages(ctx, r); err != nil { - return err - } - - logger.Info("Autotagging galleries...") - if err := t.processGalleries(ctx, r); err != nil { - return err - } - - if job.IsCancelled(ctx) { - logger.Info("Stopping due to user request") - } - return nil }); err != nil { - logger.Error(err.Error()) + logger.Errorf("error getting count for autotag task: %v", err) + return + } + + logger.Info("Autotagging scenes...") + if err := t.processScenes(ctx, r); err != nil { + logger.Errorf("error processing scenes: %w", err) + return + } + + logger.Info("Autotagging images...") + if err := t.processImages(ctx, r); err != nil { + logger.Errorf("error processing images: %w", err) + return + } + + logger.Info("Autotagging galleries...") + if err := t.processGalleries(ctx, r); err != nil { + logger.Errorf("error processing galleries: %w", err) + return + } + + if job.IsCancelled(ctx) { + logger.Info("Stopping due to user request") } } type autoTagSceneTask struct { - txnManager models.TransactionManager + txnManager Repository scene *models.Scene performers bool @@ -673,20 +695,21 @@ type autoTagSceneTask struct { func (t *autoTagSceneTask) Start(ctx context.Context, wg *sync.WaitGroup) { defer wg.Done() - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { + r := t.txnManager + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { if t.performers { - if err := autotag.ScenePerformers(t.scene, r.Scene(), r.Performer(), t.cache); err != nil { - return fmt.Errorf("error tagging scene performers for %s: %v", t.scene.Path, err) + if err := autotag.ScenePerformers(ctx, t.scene, r.Scene, r.Performer, t.cache); err != nil { + return fmt.Errorf("error tagging scene performers for %s: %v", t.scene.DisplayName(), err) } } if t.studios { - if err := autotag.SceneStudios(t.scene, r.Scene(), r.Studio(), t.cache); err != nil { - return fmt.Errorf("error tagging scene studio for %s: %v", t.scene.Path, err) + if err := autotag.SceneStudios(ctx, t.scene, r.Scene, r.Studio, t.cache); err != nil { + return fmt.Errorf("error tagging scene studio for %s: %v", t.scene.DisplayName(), err) } } if t.tags { - if err := autotag.SceneTags(t.scene, r.Scene(), r.Tag(), t.cache); err != nil { - return fmt.Errorf("error tagging scene tags for %s: %v", t.scene.Path, err) + if err := autotag.SceneTags(ctx, t.scene, r.Scene, r.Tag, t.cache); err != nil { + return fmt.Errorf("error tagging scene tags for %s: %v", t.scene.DisplayName(), err) } } @@ -697,7 +720,7 @@ func (t *autoTagSceneTask) Start(ctx context.Context, wg *sync.WaitGroup) { } type autoTagImageTask struct { - txnManager models.TransactionManager + txnManager Repository image *models.Image performers bool @@ -709,20 +732,21 @@ type autoTagImageTask struct { func (t *autoTagImageTask) Start(ctx context.Context, wg *sync.WaitGroup) { defer wg.Done() - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { + r := t.txnManager + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { if t.performers { - if err := autotag.ImagePerformers(t.image, r.Image(), r.Performer(), t.cache); err != nil { - return fmt.Errorf("error tagging image performers for %s: %v", t.image.Path, err) + if err := autotag.ImagePerformers(ctx, t.image, r.Image, r.Performer, t.cache); err != nil { + return fmt.Errorf("error tagging image performers for %s: %v", t.image.DisplayName(), err) } } if t.studios { - if err := autotag.ImageStudios(t.image, r.Image(), r.Studio(), t.cache); err != nil { - return fmt.Errorf("error tagging image studio for %s: %v", t.image.Path, err) + if err := autotag.ImageStudios(ctx, t.image, r.Image, r.Studio, t.cache); err != nil { + return fmt.Errorf("error tagging image studio for %s: %v", t.image.DisplayName(), err) } } if t.tags { - if err := autotag.ImageTags(t.image, r.Image(), r.Tag(), t.cache); err != nil { - return fmt.Errorf("error tagging image tags for %s: %v", t.image.Path, err) + if err := autotag.ImageTags(ctx, t.image, r.Image, r.Tag, t.cache); err != nil { + return fmt.Errorf("error tagging image tags for %s: %v", t.image.DisplayName(), err) } } @@ -733,7 +757,7 @@ func (t *autoTagImageTask) Start(ctx context.Context, wg *sync.WaitGroup) { } type autoTagGalleryTask struct { - txnManager models.TransactionManager + txnManager Repository gallery *models.Gallery performers bool @@ -745,20 +769,21 @@ type autoTagGalleryTask struct { func (t *autoTagGalleryTask) Start(ctx context.Context, wg *sync.WaitGroup) { defer wg.Done() - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { + r := t.txnManager + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { if t.performers { - if err := autotag.GalleryPerformers(t.gallery, r.Gallery(), r.Performer(), t.cache); err != nil { - return fmt.Errorf("error tagging gallery performers for %s: %v", t.gallery.Path.String, err) + if err := autotag.GalleryPerformers(ctx, t.gallery, r.Gallery, r.Performer, t.cache); err != nil { + return fmt.Errorf("error tagging gallery performers for %s: %v", t.gallery.DisplayName(), err) } } if t.studios { - if err := autotag.GalleryStudios(t.gallery, r.Gallery(), r.Studio(), t.cache); err != nil { - return fmt.Errorf("error tagging gallery studio for %s: %v", t.gallery.Path.String, err) + if err := autotag.GalleryStudios(ctx, t.gallery, r.Gallery, r.Studio, t.cache); err != nil { + return fmt.Errorf("error tagging gallery studio for %s: %v", t.gallery.DisplayName(), err) } } if t.tags { - if err := autotag.GalleryTags(t.gallery, r.Gallery(), r.Tag(), t.cache); err != nil { - return fmt.Errorf("error tagging gallery tags for %s: %v", t.gallery.Path.String, err) + if err := autotag.GalleryTags(ctx, t.gallery, r.Gallery, r.Tag, t.cache); err != nil { + return fmt.Errorf("error tagging gallery tags for %s: %v", t.gallery.DisplayName(), err) } } diff --git a/internal/manager/task_clean.go b/internal/manager/task_clean.go index 2853e612c..61076b15d 100644 --- a/internal/manager/task_clean.go +++ b/internal/manager/task_clean.go @@ -3,12 +3,13 @@ package manager import ( "context" "fmt" + "io/fs" "path/filepath" + "time" "github.com/stashapp/stash/internal/manager/config" "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/fsutil" - "github.com/stashapp/stash/pkg/gallery" "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/job" "github.com/stashapp/stash/pkg/logger" @@ -17,45 +18,31 @@ import ( "github.com/stashapp/stash/pkg/scene" ) +type cleaner interface { + Clean(ctx context.Context, options file.CleanOptions, progress *job.Progress) +} + type cleanJob struct { - txnManager models.TransactionManager - input models.CleanMetadataInput - scanSubs *subscriptionManager + cleaner cleaner + txnManager Repository + input CleanMetadataInput + sceneService SceneService + imageService ImageService + scanSubs *subscriptionManager } func (j *cleanJob) Execute(ctx context.Context, progress *job.Progress) { logger.Infof("Starting cleaning of tracked files") + start := time.Now() if j.input.DryRun { logger.Infof("Running in Dry Mode") } - if err := j.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - total, err := j.getCount(r) - if err != nil { - return fmt.Errorf("error getting count: %w", err) - } - - progress.SetTotal(total) - - if job.IsCancelled(ctx) { - return nil - } - - if err := j.processScenes(ctx, progress, r.Scene()); err != nil { - return fmt.Errorf("error cleaning scenes: %w", err) - } - if err := j.processImages(ctx, progress, r.Image()); err != nil { - return fmt.Errorf("error cleaning images: %w", err) - } - if err := j.processGalleries(ctx, progress, r.Gallery(), r.Image()); err != nil { - return fmt.Errorf("error cleaning galleries: %w", err) - } - - return nil - }); err != nil { - logger.Error(err.Error()) - return - } + j.cleaner.Clean(ctx, file.CleanOptions{ + Paths: j.input.Paths, + DryRun: j.input.DryRun, + PathFilter: newCleanFilter(instance.Config), + }, progress) if job.IsCancelled(ctx) { logger.Info("Stopping due to user request") @@ -63,303 +50,89 @@ func (j *cleanJob) Execute(ctx context.Context, progress *job.Progress) { } j.scanSubs.notify() - logger.Info("Finished Cleaning") + elapsed := time.Since(start) + logger.Info(fmt.Sprintf("Finished Cleaning (%s)", elapsed)) } -func (j *cleanJob) getCount(r models.ReaderRepository) (int, error) { - sceneFilter := scene.PathsFilter(j.input.Paths) - sceneResult, err := r.Scene().Query(models.SceneQueryOptions{ - QueryOptions: models.QueryOptions{ - Count: true, +type cleanFilter struct { + scanFilter +} + +func newCleanFilter(c *config.Instance) *cleanFilter { + return &cleanFilter{ + scanFilter: scanFilter{ + extensionConfig: newExtensionConfig(c), + stashPaths: c.GetStashPaths(), + generatedPath: c.GetGeneratedPath(), + videoExcludeRegex: generateRegexps(c.GetExcludes()), + imageExcludeRegex: generateRegexps(c.GetImageExcludes()), }, - SceneFilter: sceneFilter, - }) - if err != nil { - return 0, err } - - imageCount, err := r.Image().QueryCount(image.PathsFilter(j.input.Paths), nil) - if err != nil { - return 0, err - } - - galleryCount, err := r.Gallery().QueryCount(gallery.PathsFilter(j.input.Paths), nil) - if err != nil { - return 0, err - } - - return sceneResult.Count + imageCount + galleryCount, nil } -func (j *cleanJob) processScenes(ctx context.Context, progress *job.Progress, qb models.SceneReader) error { - batchSize := 1000 +func (f *cleanFilter) Accept(ctx context.Context, path string, info fs.FileInfo) bool { + // #1102 - clean anything in generated path + generatedPath := f.generatedPath - findFilter := models.BatchFindFilter(batchSize) - sceneFilter := scene.PathsFilter(j.input.Paths) - sort := "path" - findFilter.Sort = &sort + var stash *config.StashConfig + fileOrFolder := "File" - var toDelete []int - - more := true - for more { - if job.IsCancelled(ctx) { - return nil - } - - scenes, err := scene.Query(qb, sceneFilter, findFilter) - if err != nil { - return fmt.Errorf("error querying for scenes: %w", err) - } - - for _, scene := range scenes { - progress.ExecuteTask(fmt.Sprintf("Assessing scene %s for clean", scene.Path), func() { - if j.shouldCleanScene(scene) { - toDelete = append(toDelete, scene.ID) - } else { - // increment progress, no further processing - progress.Increment() - } - }) - } - - if len(scenes) != batchSize { - more = false - } else { - *findFilter.Page++ - } + if info.IsDir() { + fileOrFolder = "Folder" + stash = getStashFromDirPath(f.stashPaths, path) + } else { + stash = getStashFromPath(f.stashPaths, path) } - if j.input.DryRun && len(toDelete) > 0 { - // add progress for scenes that would've been deleted - progress.AddProcessed(len(toDelete)) - } - - fileNamingAlgorithm := instance.Config.GetVideoFileNamingAlgorithm() - - if !j.input.DryRun && len(toDelete) > 0 { - progress.ExecuteTask(fmt.Sprintf("Cleaning %d scenes", len(toDelete)), func() { - for _, sceneID := range toDelete { - if job.IsCancelled(ctx) { - return - } - - j.deleteScene(ctx, fileNamingAlgorithm, sceneID) - - progress.Increment() - } - }) - } - - return nil -} - -func (j *cleanJob) processGalleries(ctx context.Context, progress *job.Progress, qb models.GalleryReader, iqb models.ImageReader) error { - batchSize := 1000 - - findFilter := models.BatchFindFilter(batchSize) - galleryFilter := gallery.PathsFilter(j.input.Paths) - sort := "path" - findFilter.Sort = &sort - - var toDelete []int - - more := true - for more { - if job.IsCancelled(ctx) { - return nil - } - - galleries, _, err := qb.Query(galleryFilter, findFilter) - if err != nil { - return fmt.Errorf("error querying for galleries: %w", err) - } - - for _, gallery := range galleries { - progress.ExecuteTask(fmt.Sprintf("Assessing gallery %s for clean", gallery.GetTitle()), func() { - if j.shouldCleanGallery(gallery, iqb) { - toDelete = append(toDelete, gallery.ID) - } else { - // increment progress, no further processing - progress.Increment() - } - }) - } - - if len(galleries) != batchSize { - more = false - } else { - *findFilter.Page++ - } - } - - if j.input.DryRun && len(toDelete) > 0 { - // add progress for galleries that would've been deleted - progress.AddProcessed(len(toDelete)) - } - - if !j.input.DryRun && len(toDelete) > 0 { - progress.ExecuteTask(fmt.Sprintf("Cleaning %d galleries", len(toDelete)), func() { - for _, galleryID := range toDelete { - if job.IsCancelled(ctx) { - return - } - - j.deleteGallery(ctx, galleryID) - - progress.Increment() - } - }) - } - - return nil -} - -func (j *cleanJob) processImages(ctx context.Context, progress *job.Progress, qb models.ImageReader) error { - batchSize := 1000 - - findFilter := models.BatchFindFilter(batchSize) - imageFilter := image.PathsFilter(j.input.Paths) - - // performance consideration: order by path since default ordering by - // title is slow - sortBy := "path" - findFilter.Sort = &sortBy - - var toDelete []int - - more := true - for more { - if job.IsCancelled(ctx) { - return nil - } - - images, err := image.Query(qb, imageFilter, findFilter) - if err != nil { - return fmt.Errorf("error querying for images: %w", err) - } - - for _, image := range images { - progress.ExecuteTask(fmt.Sprintf("Assessing image %s for clean", image.Path), func() { - if j.shouldCleanImage(image) { - toDelete = append(toDelete, image.ID) - } else { - // increment progress, no further processing - progress.Increment() - } - }) - } - - if len(images) != batchSize { - more = false - } else { - *findFilter.Page++ - } - } - - if j.input.DryRun && len(toDelete) > 0 { - // add progress for images that would've been deleted - progress.AddProcessed(len(toDelete)) - } - - if !j.input.DryRun && len(toDelete) > 0 { - progress.ExecuteTask(fmt.Sprintf("Cleaning %d images", len(toDelete)), func() { - for _, imageID := range toDelete { - if job.IsCancelled(ctx) { - return - } - - j.deleteImage(ctx, imageID) - - progress.Increment() - } - }) - } - - return nil -} - -func (j *cleanJob) shouldClean(path string) bool { - // use image.FileExists for zip file checking - fileExists := image.FileExists(path) - - // #1102 - clean anything in generated path - generatedPath := config.GetInstance().GetGeneratedPath() - if !fileExists || getStashFromPath(path) == nil || fsutil.IsPathInDir(generatedPath, path) { - logger.Infof("File not found. Marking to clean: \"%s\"", path) - return true - } - - return false -} - -func (j *cleanJob) shouldCleanScene(s *models.Scene) bool { - if j.shouldClean(s.Path) { - return true - } - - stash := getStashFromPath(s.Path) - if stash.ExcludeVideo { - logger.Infof("File in stash library that excludes video. Marking to clean: \"%s\"", s.Path) - return true - } - - config := config.GetInstance() - if !fsutil.MatchExtension(s.Path, config.GetVideoExtensions()) { - logger.Infof("File extension does not match video extensions. Marking to clean: \"%s\"", s.Path) - return true - } - - if matchFile(s.Path, config.GetExcludes()) { - logger.Infof("File matched regex. Marking to clean: \"%s\"", s.Path) - return true - } - - return false -} - -func (j *cleanJob) shouldCleanGallery(g *models.Gallery, qb models.ImageReader) bool { - // never clean manually created galleries - if !g.Path.Valid { + if stash == nil { + logger.Infof("%s not in any stash library directories. Marking to clean: \"%s\"", fileOrFolder, path) return false } - path := g.Path.String - if j.shouldClean(path) { + if fsutil.IsPathInDir(generatedPath, path) { + logger.Infof("%s is in generated path. Marking to clean: \"%s\"", fileOrFolder, path) + return false + } + + if info.IsDir() { + return !f.shouldCleanFolder(path, stash) + } + + return !f.shouldCleanFile(path, info, stash) +} + +func (f *cleanFilter) shouldCleanFolder(path string, s *config.StashConfig) bool { + // only delete folders where it is excluded from everything + pathExcludeTest := path + string(filepath.Separator) + if (s.ExcludeVideo || matchFileRegex(pathExcludeTest, f.videoExcludeRegex)) && (s.ExcludeImage || matchFileRegex(pathExcludeTest, f.imageExcludeRegex)) { + logger.Infof("Folder is excluded from both video and image. Marking to clean: \"%s\"", path) return true } - stash := getStashFromPath(path) - if stash.ExcludeImage { - logger.Infof("File in stash library that excludes images. Marking to clean: \"%s\"", path) + return false +} + +func (f *cleanFilter) shouldCleanFile(path string, info fs.FileInfo, stash *config.StashConfig) bool { + switch { + case info.IsDir() || fsutil.MatchExtension(path, f.zipExt): + return f.shouldCleanGallery(path, stash) + case fsutil.MatchExtension(path, f.vidExt): + return f.shouldCleanVideoFile(path, stash) + case fsutil.MatchExtension(path, f.imgExt): + return f.shouldCleanImage(path, stash) + default: + logger.Infof("File extension does not match any media extensions. Marking to clean: \"%s\"", path) + return true + } +} + +func (f *cleanFilter) shouldCleanVideoFile(path string, stash *config.StashConfig) bool { + if stash.ExcludeVideo { + logger.Infof("File in stash library that excludes video. Marking to clean: \"%s\"", path) return true } - config := config.GetInstance() - if g.Zip { - if !fsutil.MatchExtension(path, config.GetGalleryExtensions()) { - logger.Infof("File extension does not match gallery extensions. Marking to clean: \"%s\"", path) - return true - } - - if countImagesInZip(path) == 0 { - logger.Infof("Gallery has 0 images. Marking to clean: \"%s\"", path) - return true - } - } else { - // folder-based - delete if it has no images - count, err := qb.CountByGalleryID(g.ID) - if err != nil { - logger.Warnf("Error trying to count gallery images for %q: %v", path, err) - return false - } - - if count == 0 { - return true - } - } - - if matchFile(path, config.GetImageExcludes()) { + if matchFileRegex(path, f.videoExcludeRegex) { logger.Infof("File matched regex. Marking to clean: \"%s\"", path) return true } @@ -367,140 +140,244 @@ func (j *cleanJob) shouldCleanGallery(g *models.Gallery, qb models.ImageReader) return false } -func (j *cleanJob) shouldCleanImage(s *models.Image) bool { - if j.shouldClean(s.Path) { - return true - } - - stash := getStashFromPath(s.Path) +func (f *cleanFilter) shouldCleanGallery(path string, stash *config.StashConfig) bool { if stash.ExcludeImage { - logger.Infof("File in stash library that excludes images. Marking to clean: \"%s\"", s.Path) + logger.Infof("File in stash library that excludes images. Marking to clean: \"%s\"", path) return true } - config := config.GetInstance() - if !fsutil.MatchExtension(s.Path, config.GetImageExtensions()) { - logger.Infof("File extension does not match image extensions. Marking to clean: \"%s\"", s.Path) - return true - } - - if matchFile(s.Path, config.GetImageExcludes()) { - logger.Infof("File matched regex. Marking to clean: \"%s\"", s.Path) + if matchFileRegex(path, f.imageExcludeRegex) { + logger.Infof("File matched regex. Marking to clean: \"%s\"", path) return true } return false } -func (j *cleanJob) deleteScene(ctx context.Context, fileNamingAlgorithm models.HashAlgorithm, sceneID int) { - fileNamingAlgo := GetInstance().Config.GetVideoFileNamingAlgorithm() +func (f *cleanFilter) shouldCleanImage(path string, stash *config.StashConfig) bool { + if stash.ExcludeImage { + logger.Infof("File in stash library that excludes images. Marking to clean: \"%s\"", path) + return true + } - fileDeleter := &scene.FileDeleter{ - Deleter: *file.NewDeleter(), + if matchFileRegex(path, f.imageExcludeRegex) { + logger.Infof("File matched regex. Marking to clean: \"%s\"", path) + return true + } + + return false +} + +type cleanHandler struct { + PluginCache *plugin.Cache +} + +func (h *cleanHandler) HandleFile(ctx context.Context, fileDeleter *file.Deleter, fileID file.ID) error { + if err := h.handleRelatedScenes(ctx, fileDeleter, fileID); err != nil { + return err + } + if err := h.handleRelatedGalleries(ctx, fileID); err != nil { + return err + } + if err := h.handleRelatedImages(ctx, fileDeleter, fileID); err != nil { + return err + } + + return nil +} + +func (h *cleanHandler) HandleFolder(ctx context.Context, fileDeleter *file.Deleter, folderID file.FolderID) error { + return h.deleteRelatedFolderGalleries(ctx, folderID) +} + +func (h *cleanHandler) handleRelatedScenes(ctx context.Context, fileDeleter *file.Deleter, fileID file.ID) error { + mgr := GetInstance() + sceneQB := mgr.Database.Scene + scenes, err := sceneQB.FindByFileID(ctx, fileID) + if err != nil { + return err + } + + fileNamingAlgo := mgr.Config.GetVideoFileNamingAlgorithm() + + sceneFileDeleter := &scene.FileDeleter{ + Deleter: fileDeleter, FileNamingAlgo: fileNamingAlgo, - Paths: GetInstance().Paths, + Paths: mgr.Paths, } - var s *models.Scene - if err := j.txnManager.WithTxn(ctx, func(repo models.Repository) error { - qb := repo.Scene() - var err error - s, err = qb.Find(sceneID) - if err != nil { + for _, scene := range scenes { + if err := scene.LoadFiles(ctx, sceneQB); err != nil { return err } - return scene.Destroy(s, repo, fileDeleter, true, false) - }); err != nil { - fileDeleter.Rollback() + // only delete if the scene has no other files + if len(scene.Files.List()) <= 1 { + logger.Infof("Deleting scene %q since it has no other related files", scene.DisplayName()) + if err := mgr.SceneService.Destroy(ctx, scene, sceneFileDeleter, true, false); err != nil { + return err + } - logger.Errorf("Error deleting scene from database: %s", err.Error()) - return + checksum := scene.Checksum + oshash := scene.OSHash + + mgr.PluginCache.RegisterPostHooks(ctx, mgr.Database, scene.ID, plugin.SceneDestroyPost, plugin.SceneDestroyInput{ + Checksum: checksum, + OSHash: oshash, + Path: scene.Path, + }, nil) + } else { + // set the primary file to a remaining file + var newPrimaryID file.ID + for _, f := range scene.Files.List() { + if f.ID != fileID { + newPrimaryID = f.ID + break + } + } + + if _, err := mgr.Repository.Scene.UpdatePartial(ctx, scene.ID, models.ScenePartial{ + PrimaryFileID: &newPrimaryID, + }); err != nil { + return err + } + } } - // perform the post-commit actions - fileDeleter.Commit() - - GetInstance().PluginCache.ExecutePostHooks(ctx, sceneID, plugin.SceneDestroyPost, plugin.SceneDestroyInput{ - Checksum: s.Checksum.String, - OSHash: s.OSHash.String, - Path: s.Path, - }, nil) + return nil } -func (j *cleanJob) deleteGallery(ctx context.Context, galleryID int) { - var g *models.Gallery +func (h *cleanHandler) handleRelatedGalleries(ctx context.Context, fileID file.ID) error { + mgr := GetInstance() + qb := mgr.Database.Gallery + galleries, err := qb.FindByFileID(ctx, fileID) + if err != nil { + return err + } - if err := j.txnManager.WithTxn(ctx, func(repo models.Repository) error { - qb := repo.Gallery() - - var err error - g, err = qb.Find(galleryID) - if err != nil { + for _, g := range galleries { + if err := g.LoadFiles(ctx, qb); err != nil { return err } - return qb.Destroy(galleryID) - }); err != nil { - logger.Errorf("Error deleting gallery from database: %s", err.Error()) - return + // only delete if the gallery has no other files + if len(g.Files.List()) <= 1 { + logger.Infof("Deleting gallery %q since it has no other related files", g.DisplayName()) + if err := qb.Destroy(ctx, g.ID); err != nil { + return err + } + + mgr.PluginCache.RegisterPostHooks(ctx, mgr.Database, g.ID, plugin.GalleryDestroyPost, plugin.GalleryDestroyInput{ + Checksum: g.Checksum(), + Path: g.Path, + }, nil) + } else { + // set the primary file to a remaining file + var newPrimaryID file.ID + for _, f := range g.Files.List() { + if f.Base().ID != fileID { + newPrimaryID = f.Base().ID + break + } + } + + if _, err := mgr.Repository.Gallery.UpdatePartial(ctx, g.ID, models.GalleryPartial{ + PrimaryFileID: &newPrimaryID, + }); err != nil { + return err + } + } } - GetInstance().PluginCache.ExecutePostHooks(ctx, galleryID, plugin.GalleryDestroyPost, plugin.GalleryDestroyInput{ - Checksum: g.Checksum, - Path: g.Path.String, - }, nil) + return nil } -func (j *cleanJob) deleteImage(ctx context.Context, imageID int) { - fileDeleter := &image.FileDeleter{ - Deleter: *file.NewDeleter(), +func (h *cleanHandler) deleteRelatedFolderGalleries(ctx context.Context, folderID file.FolderID) error { + mgr := GetInstance() + qb := mgr.Database.Gallery + galleries, err := qb.FindByFolderID(ctx, folderID) + if err != nil { + return err + } + + for _, g := range galleries { + logger.Infof("Deleting folder-based gallery %q since the folder no longer exists", g.DisplayName()) + if err := qb.Destroy(ctx, g.ID); err != nil { + return err + } + + mgr.PluginCache.RegisterPostHooks(ctx, mgr.Database, g.ID, plugin.GalleryDestroyPost, plugin.GalleryDestroyInput{ + // No checksum for folders + // Checksum: g.Checksum(), + Path: g.Path, + }, nil) + } + + return nil +} + +func (h *cleanHandler) handleRelatedImages(ctx context.Context, fileDeleter *file.Deleter, fileID file.ID) error { + mgr := GetInstance() + imageQB := mgr.Database.Image + images, err := imageQB.FindByFileID(ctx, fileID) + if err != nil { + return err + } + + imageFileDeleter := &image.FileDeleter{ + Deleter: fileDeleter, Paths: GetInstance().Paths, } - var i *models.Image - if err := j.txnManager.WithTxn(ctx, func(repo models.Repository) error { - qb := repo.Image() - - var err error - i, err = qb.Find(imageID) - if err != nil { + for _, i := range images { + if err := i.LoadFiles(ctx, imageQB); err != nil { return err } - if i == nil { - return fmt.Errorf("image not found: %d", imageID) + if len(i.Files.List()) <= 1 { + logger.Infof("Deleting image %q since it has no other related files", i.DisplayName()) + if err := mgr.ImageService.Destroy(ctx, i, imageFileDeleter, true, false); err != nil { + return err + } + + mgr.PluginCache.RegisterPostHooks(ctx, mgr.Database, i.ID, plugin.ImageDestroyPost, plugin.ImageDestroyInput{ + Checksum: i.Checksum, + Path: i.Path, + }, nil) + } else { + // set the primary file to a remaining file + var newPrimaryID file.ID + for _, f := range i.Files.List() { + if f.Base().ID != fileID { + newPrimaryID = f.Base().ID + break + } + } + + if _, err := mgr.Repository.Image.UpdatePartial(ctx, i.ID, models.ImagePartial{ + PrimaryFileID: &newPrimaryID, + }); err != nil { + return err + } } - - return image.Destroy(i, qb, fileDeleter, true, false) - }); err != nil { - fileDeleter.Rollback() - - logger.Errorf("Error deleting image from database: %s", err.Error()) - return } - // perform the post-commit actions - fileDeleter.Commit() - GetInstance().PluginCache.ExecutePostHooks(ctx, imageID, plugin.ImageDestroyPost, plugin.ImageDestroyInput{ - Checksum: i.Checksum, - Path: i.Path, - }, nil) + return nil } -func getStashFromPath(pathToCheck string) *models.StashConfig { - for _, s := range config.GetInstance().GetStashPaths() { - if fsutil.IsPathInDir(s.Path, filepath.Dir(pathToCheck)) { - return s +func getStashFromPath(stashes []*config.StashConfig, pathToCheck string) *config.StashConfig { + for _, f := range stashes { + if fsutil.IsPathInDir(f.Path, filepath.Dir(pathToCheck)) { + return f } } return nil } -func getStashFromDirPath(pathToCheck string) *models.StashConfig { - for _, s := range config.GetInstance().GetStashPaths() { - if fsutil.IsPathInDir(s.Path, pathToCheck) { - return s +func getStashFromDirPath(stashes []*config.StashConfig, pathToCheck string) *config.StashConfig { + for _, f := range stashes { + if fsutil.IsPathInDir(f.Path, pathToCheck) { + return f } } return nil diff --git a/internal/manager/task_export.go b/internal/manager/task_export.go index 32141f3ea..225938e70 100644 --- a/internal/manager/task_export.go +++ b/internal/manager/task_export.go @@ -8,13 +8,14 @@ import ( "os" "path/filepath" "runtime" + "strconv" "sync" "time" "github.com/stashapp/stash/internal/manager/config" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/gallery" - "github.com/stashapp/stash/pkg/hash/md5" "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" @@ -32,13 +33,12 @@ import ( ) type ExportTask struct { - txnManager models.TransactionManager + txnManager Repository full bool baseDir string json jsonUtils - Mappings *jsonschema.Mappings fileNamingAlgorithm models.HashAlgorithm scenes *exportSpec @@ -54,12 +54,28 @@ type ExportTask struct { DownloadHash string } +type ExportObjectTypeInput struct { + Ids []string `json:"ids"` + All *bool `json:"all"` +} + +type ExportObjectsInput struct { + Scenes *ExportObjectTypeInput `json:"scenes"` + Images *ExportObjectTypeInput `json:"images"` + Studios *ExportObjectTypeInput `json:"studios"` + Performers *ExportObjectTypeInput `json:"performers"` + Tags *ExportObjectTypeInput `json:"tags"` + Movies *ExportObjectTypeInput `json:"movies"` + Galleries *ExportObjectTypeInput `json:"galleries"` + IncludeDependencies *bool `json:"includeDependencies"` +} + type exportSpec struct { IDs []int all bool } -func newExportSpec(input *models.ExportObjectTypeInput) *exportSpec { +func newExportSpec(input *ExportObjectTypeInput) *exportSpec { if input == nil { return &exportSpec{} } @@ -77,14 +93,14 @@ func newExportSpec(input *models.ExportObjectTypeInput) *exportSpec { return ret } -func CreateExportTask(a models.HashAlgorithm, input models.ExportObjectsInput) *ExportTask { +func CreateExportTask(a models.HashAlgorithm, input ExportObjectsInput) *ExportTask { includeDeps := false if input.IncludeDependencies != nil { includeDeps = *input.IncludeDependencies } return &ExportTask{ - txnManager: GetInstance().TxnManager, + txnManager: GetInstance().Repository, fileNamingAlgorithm: a, scenes: newExportSpec(input.Scenes), images: newExportSpec(input.Images), @@ -102,8 +118,6 @@ func (t *ExportTask) Start(ctx context.Context, wg *sync.WaitGroup) { // @manager.total = Scene.count + Gallery.count + Performer.count + Studio.count + Movie.count workerCount := runtime.GOMAXPROCS(0) // set worker count to number of cpus available - t.Mappings = &jsonschema.Mappings{} - startTime := time.Now() if t.full { @@ -124,36 +138,44 @@ func (t *ExportTask) Start(ctx context.Context, wg *sync.WaitGroup) { }() } + if t.baseDir == "" { + logger.Errorf("baseDir must not be empty") + return + } + t.json = jsonUtils{ json: *paths.GetJSONPaths(t.baseDir), } + paths.EmptyJSONDirs(t.baseDir) paths.EnsureJSONDirs(t.baseDir) - txnErr := t.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { + txnErr := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + r := t.txnManager + // include movie scenes and gallery images if !t.full { // only include movie scenes if includeDependencies is also set if !t.scenes.all && t.includeDependencies { - t.populateMovieScenes(r) + t.populateMovieScenes(ctx, r) } // always export gallery images if !t.images.all { - t.populateGalleryImages(r) + t.populateGalleryImages(ctx, r) } } - t.ExportScenes(workerCount, r) - t.ExportImages(workerCount, r) - t.ExportGalleries(workerCount, r) - t.ExportMovies(workerCount, r) - t.ExportPerformers(workerCount, r) - t.ExportStudios(workerCount, r) - t.ExportTags(workerCount, r) + t.ExportScenes(ctx, workerCount, r) + t.ExportImages(ctx, workerCount, r) + t.ExportGalleries(ctx, workerCount, r) + t.ExportMovies(ctx, workerCount, r) + t.ExportPerformers(ctx, workerCount, r) + t.ExportStudios(ctx, workerCount, r) + t.ExportTags(ctx, workerCount, r) if t.full { - t.ExportScrapedItems(r) + t.ExportScrapedItems(ctx, r) } return nil @@ -162,10 +184,6 @@ func (t *ExportTask) Start(ctx context.Context, wg *sync.WaitGroup) { logger.Warnf("error while running export transaction: %v", txnErr) } - if err := t.json.saveMappings(t.Mappings); err != nil { - logger.Errorf("[mappings] failed to save json: %s", err.Error()) - } - if !t.full { err := t.generateDownload() if err != nil { @@ -208,12 +226,6 @@ func (t *ExportTask) zipFiles(w io.Writer) error { json: *paths.GetJSONPaths(""), } - // write the mappings file - err := t.zipFile(t.json.json.MappingsFile, "", z) - if err != nil { - return err - } - walkWarn(t.json.json.Tags, t.zipWalkFunc(u.json.Tags, z)) walkWarn(t.json.json.Galleries, t.zipWalkFunc(u.json.Galleries, z)) walkWarn(t.json.json.Performers, t.zipWalkFunc(u.json.Performers, z)) @@ -268,17 +280,17 @@ func (t *ExportTask) zipFile(fn, outDir string, z *zip.Writer) error { return nil } -func (t *ExportTask) populateMovieScenes(repo models.ReaderRepository) { - reader := repo.Movie() - sceneReader := repo.Scene() +func (t *ExportTask) populateMovieScenes(ctx context.Context, repo Repository) { + reader := repo.Movie + sceneReader := repo.Scene var movies []*models.Movie var err error all := t.full || (t.movies != nil && t.movies.all) if all { - movies, err = reader.All() + movies, err = reader.All(ctx) } else if t.movies != nil && len(t.movies.IDs) > 0 { - movies, err = reader.FindMany(t.movies.IDs) + movies, err = reader.FindMany(ctx, t.movies.IDs) } if err != nil { @@ -286,7 +298,7 @@ func (t *ExportTask) populateMovieScenes(repo models.ReaderRepository) { } for _, m := range movies { - scenes, err := sceneReader.FindByMovieID(m.ID) + scenes, err := sceneReader.FindByMovieID(ctx, m.ID) if err != nil { logger.Errorf("[movies] <%s> failed to fetch scenes for movie: %s", m.Checksum, err.Error()) continue @@ -298,17 +310,17 @@ func (t *ExportTask) populateMovieScenes(repo models.ReaderRepository) { } } -func (t *ExportTask) populateGalleryImages(repo models.ReaderRepository) { - reader := repo.Gallery() - imageReader := repo.Image() +func (t *ExportTask) populateGalleryImages(ctx context.Context, repo Repository) { + reader := repo.Gallery + imageReader := repo.Image var galleries []*models.Gallery var err error all := t.full || (t.galleries != nil && t.galleries.all) if all { - galleries, err = reader.All() + galleries, err = reader.All(ctx) } else if t.galleries != nil && len(t.galleries.IDs) > 0 { - galleries, err = reader.FindMany(t.galleries.IDs) + galleries, err = reader.FindMany(ctx, t.galleries.IDs) } if err != nil { @@ -316,9 +328,14 @@ func (t *ExportTask) populateGalleryImages(repo models.ReaderRepository) { } for _, g := range galleries { - images, err := imageReader.FindByGalleryID(g.ID) + if err := g.LoadFiles(ctx, reader); err != nil { + logger.Errorf("[galleries] <%s> failed to fetch files for gallery: %s", g.DisplayName(), err.Error()) + continue + } + + images, err := imageReader.FindByGalleryID(ctx, g.ID) if err != nil { - logger.Errorf("[galleries] <%s> failed to fetch images for gallery: %s", g.Checksum, err.Error()) + logger.Errorf("[galleries] <%s> failed to fetch images for gallery: %s", g.Checksum(), err.Error()) continue } @@ -328,18 +345,18 @@ func (t *ExportTask) populateGalleryImages(repo models.ReaderRepository) { } } -func (t *ExportTask) ExportScenes(workers int, repo models.ReaderRepository) { +func (t *ExportTask) ExportScenes(ctx context.Context, workers int, repo Repository) { var scenesWg sync.WaitGroup - sceneReader := repo.Scene() + sceneReader := repo.Scene var scenes []*models.Scene var err error all := t.full || (t.scenes != nil && t.scenes.all) if all { - scenes, err = sceneReader.All() + scenes, err = sceneReader.All(ctx) } else if t.scenes != nil && len(t.scenes.IDs) > 0 { - scenes, err = sceneReader.FindMany(t.scenes.IDs) + scenes, err = sceneReader.FindMany(ctx, t.scenes.IDs) } if err != nil { @@ -353,7 +370,7 @@ func (t *ExportTask) ExportScenes(workers int, repo models.ReaderRepository) { for w := 0; w < workers; w++ { // create export Scene workers scenesWg.Add(1) - go exportScene(&scenesWg, jobCh, repo, t) + go exportScene(ctx, &scenesWg, jobCh, repo, t) } for i, scene := range scenes { @@ -362,7 +379,6 @@ func (t *ExportTask) ExportScenes(workers int, repo models.ReaderRepository) { if (i % 100) == 0 { // make progress easier to read logger.Progressf("[scenes] %d of %d", index, len(scenes)) } - t.Mappings.Scenes = append(t.Mappings.Scenes, jsonschema.PathNameMapping{Path: scene.Path, Checksum: scene.GetHash(t.fileNamingAlgorithm)}) jobCh <- scene // feed workers } @@ -372,40 +388,146 @@ func (t *ExportTask) ExportScenes(workers int, repo models.ReaderRepository) { logger.Infof("[scenes] export complete in %s. %d workers used.", time.Since(startTime), workers) } -func exportScene(wg *sync.WaitGroup, jobChan <-chan *models.Scene, repo models.ReaderRepository, t *ExportTask) { +func exportFile(f file.File, t *ExportTask) { + newFileJSON := fileToJSON(f) + + fn := newFileJSON.Filename() + + if err := t.json.saveFile(fn, newFileJSON); err != nil { + logger.Errorf("[files] <%s> failed to save json: %s", fn, err.Error()) + } +} + +func fileToJSON(f file.File) jsonschema.DirEntry { + bf := f.Base() + + base := jsonschema.BaseFile{ + BaseDirEntry: jsonschema.BaseDirEntry{ + Type: jsonschema.DirEntryTypeFile, + ModTime: json.JSONTime{Time: bf.ModTime}, + Path: bf.Path, + CreatedAt: json.JSONTime{Time: bf.CreatedAt}, + UpdatedAt: json.JSONTime{Time: bf.UpdatedAt}, + }, + Size: bf.Size, + } + + if bf.ZipFile != nil { + base.ZipFile = bf.ZipFile.Base().Path + } + + for _, fp := range bf.Fingerprints { + base.Fingerprints = append(base.Fingerprints, jsonschema.Fingerprint{ + Type: fp.Type, + Fingerprint: fp.Fingerprint, + }) + } + + switch ff := f.(type) { + case *file.VideoFile: + base.Type = jsonschema.DirEntryTypeVideo + return jsonschema.VideoFile{ + BaseFile: &base, + Format: ff.Format, + Width: ff.Width, + Height: ff.Height, + Duration: ff.Duration, + VideoCodec: ff.VideoCodec, + AudioCodec: ff.AudioCodec, + FrameRate: ff.FrameRate, + BitRate: ff.BitRate, + Interactive: ff.Interactive, + InteractiveSpeed: ff.InteractiveSpeed, + } + case *file.ImageFile: + base.Type = jsonschema.DirEntryTypeImage + return jsonschema.ImageFile{ + BaseFile: &base, + Format: ff.Format, + Width: ff.Width, + Height: ff.Height, + } + } + + return &base +} + +func exportFolder(f file.Folder, t *ExportTask) { + newFileJSON := folderToJSON(f) + + fn := newFileJSON.Filename() + + if err := t.json.saveFile(fn, newFileJSON); err != nil { + logger.Errorf("[files] <%s> failed to save json: %s", fn, err.Error()) + } +} + +func folderToJSON(f file.Folder) jsonschema.DirEntry { + base := jsonschema.BaseDirEntry{ + Type: jsonschema.DirEntryTypeFolder, + ModTime: json.JSONTime{Time: f.ModTime}, + Path: f.Path, + CreatedAt: json.JSONTime{Time: f.CreatedAt}, + UpdatedAt: json.JSONTime{Time: f.UpdatedAt}, + } + + if f.ZipFile != nil { + base.ZipFile = f.ZipFile.Base().Path + } + + return &base +} + +func exportScene(ctx context.Context, wg *sync.WaitGroup, jobChan <-chan *models.Scene, repo Repository, t *ExportTask) { defer wg.Done() - sceneReader := repo.Scene() - studioReader := repo.Studio() - movieReader := repo.Movie() - galleryReader := repo.Gallery() - performerReader := repo.Performer() - tagReader := repo.Tag() - sceneMarkerReader := repo.SceneMarker() + sceneReader := repo.Scene + studioReader := repo.Studio + movieReader := repo.Movie + galleryReader := repo.Gallery + performerReader := repo.Performer + tagReader := repo.Tag + sceneMarkerReader := repo.SceneMarker for s := range jobChan { sceneHash := s.GetHash(t.fileNamingAlgorithm) - newSceneJSON, err := scene.ToBasicJSON(sceneReader, s) + if err := s.LoadRelationships(ctx, sceneReader); err != nil { + logger.Errorf("[scenes] <%s> error loading scene relationships: %v", sceneHash, err) + } + + newSceneJSON, err := scene.ToBasicJSON(ctx, sceneReader, s) if err != nil { logger.Errorf("[scenes] <%s> error getting scene JSON: %s", sceneHash, err.Error()) continue } - newSceneJSON.Studio, err = scene.GetStudioName(studioReader, s) + // export files + for _, f := range s.Files.List() { + exportFile(f, t) + } + + newSceneJSON.Studio, err = scene.GetStudioName(ctx, studioReader, s) if err != nil { logger.Errorf("[scenes] <%s> error getting scene studio name: %s", sceneHash, err.Error()) continue } - galleries, err := galleryReader.FindBySceneID(s.ID) + galleries, err := galleryReader.FindBySceneID(ctx, s.ID) if err != nil { logger.Errorf("[scenes] <%s> error getting scene gallery checksums: %s", sceneHash, err.Error()) continue } - newSceneJSON.Galleries = gallery.GetChecksums(galleries) + for _, g := range galleries { + if err := g.LoadFiles(ctx, galleryReader); err != nil { + logger.Errorf("[scenes] <%s> error getting scene gallery files: %s", sceneHash, err.Error()) + continue + } + } - performers, err := performerReader.FindBySceneID(s.ID) + newSceneJSON.Galleries = gallery.GetRefs(galleries) + + performers, err := performerReader.FindBySceneID(ctx, s.ID) if err != nil { logger.Errorf("[scenes] <%s> error getting scene performer names: %s", sceneHash, err.Error()) continue @@ -413,39 +535,39 @@ func exportScene(wg *sync.WaitGroup, jobChan <-chan *models.Scene, repo models.R newSceneJSON.Performers = performer.GetNames(performers) - newSceneJSON.Tags, err = scene.GetTagNames(tagReader, s) + newSceneJSON.Tags, err = scene.GetTagNames(ctx, tagReader, s) if err != nil { logger.Errorf("[scenes] <%s> error getting scene tag names: %s", sceneHash, err.Error()) continue } - newSceneJSON.Markers, err = scene.GetSceneMarkersJSON(sceneMarkerReader, tagReader, s) + newSceneJSON.Markers, err = scene.GetSceneMarkersJSON(ctx, sceneMarkerReader, tagReader, s) if err != nil { logger.Errorf("[scenes] <%s> error getting scene markers JSON: %s", sceneHash, err.Error()) continue } - newSceneJSON.Movies, err = scene.GetSceneMoviesJSON(movieReader, sceneReader, s) + newSceneJSON.Movies, err = scene.GetSceneMoviesJSON(ctx, movieReader, s) if err != nil { logger.Errorf("[scenes] <%s> error getting scene movies JSON: %s", sceneHash, err.Error()) continue } if t.includeDependencies { - if s.StudioID.Valid { - t.studios.IDs = intslice.IntAppendUnique(t.studios.IDs, int(s.StudioID.Int64)) + if s.StudioID != nil { + t.studios.IDs = intslice.IntAppendUnique(t.studios.IDs, *s.StudioID) } t.galleries.IDs = intslice.IntAppendUniques(t.galleries.IDs, gallery.GetIDs(galleries)) - tagIDs, err := scene.GetDependentTagIDs(tagReader, sceneMarkerReader, s) + tagIDs, err := scene.GetDependentTagIDs(ctx, tagReader, sceneMarkerReader, s) if err != nil { logger.Errorf("[scenes] <%s> error getting scene tags: %s", sceneHash, err.Error()) continue } t.tags.IDs = intslice.IntAppendUniques(t.tags.IDs, tagIDs) - movieIDs, err := scene.GetDependentMovieIDs(sceneReader, s) + movieIDs, err := scene.GetDependentMovieIDs(ctx, s) if err != nil { logger.Errorf("[scenes] <%s> error getting scene movies: %s", sceneHash, err.Error()) continue @@ -455,29 +577,29 @@ func exportScene(wg *sync.WaitGroup, jobChan <-chan *models.Scene, repo models.R t.performers.IDs = intslice.IntAppendUniques(t.performers.IDs, performer.GetIDs(performers)) } - sceneJSON, err := t.json.getScene(sceneHash) - if err == nil && jsonschema.CompareJSON(*sceneJSON, *newSceneJSON) { - continue - } + basename := filepath.Base(s.Path) + hash := s.OSHash - if err := t.json.saveScene(sceneHash, newSceneJSON); err != nil { + fn := newSceneJSON.Filename(basename, hash) + + if err := t.json.saveScene(fn, newSceneJSON); err != nil { logger.Errorf("[scenes] <%s> failed to save json: %s", sceneHash, err.Error()) } } } -func (t *ExportTask) ExportImages(workers int, repo models.ReaderRepository) { +func (t *ExportTask) ExportImages(ctx context.Context, workers int, repo Repository) { var imagesWg sync.WaitGroup - imageReader := repo.Image() + imageReader := repo.Image var images []*models.Image var err error all := t.full || (t.images != nil && t.images.all) if all { - images, err = imageReader.All() + images, err = imageReader.All(ctx) } else if t.images != nil && len(t.images.IDs) > 0 { - images, err = imageReader.FindMany(t.images.IDs) + images, err = imageReader.FindMany(ctx, t.images.IDs) } if err != nil { @@ -491,7 +613,7 @@ func (t *ExportTask) ExportImages(workers int, repo models.ReaderRepository) { for w := 0; w < workers; w++ { // create export Image workers imagesWg.Add(1) - go exportImage(&imagesWg, jobCh, repo, t) + go exportImage(ctx, &imagesWg, jobCh, repo, t) } for i, image := range images { @@ -500,7 +622,6 @@ func (t *ExportTask) ExportImages(workers int, repo models.ReaderRepository) { if (i % 100) == 0 { // make progress easier to read logger.Progressf("[images] %d of %d", index, len(images)) } - t.Mappings.Images = append(t.Mappings.Images, jsonschema.PathNameMapping{Path: image.Path, Checksum: image.Checksum}) jobCh <- image // feed workers } @@ -510,34 +631,51 @@ func (t *ExportTask) ExportImages(workers int, repo models.ReaderRepository) { logger.Infof("[images] export complete in %s. %d workers used.", time.Since(startTime), workers) } -func exportImage(wg *sync.WaitGroup, jobChan <-chan *models.Image, repo models.ReaderRepository, t *ExportTask) { +func exportImage(ctx context.Context, wg *sync.WaitGroup, jobChan <-chan *models.Image, repo Repository, t *ExportTask) { defer wg.Done() - studioReader := repo.Studio() - galleryReader := repo.Gallery() - performerReader := repo.Performer() - tagReader := repo.Tag() + studioReader := repo.Studio + galleryReader := repo.Gallery + performerReader := repo.Performer + tagReader := repo.Tag for s := range jobChan { imageHash := s.Checksum + if err := s.LoadFiles(ctx, repo.Image); err != nil { + logger.Errorf("[images] <%s> error getting image files: %s", imageHash, err.Error()) + continue + } + newImageJSON := image.ToBasicJSON(s) + // export files + for _, f := range s.Files.List() { + exportFile(f, t) + } + var err error - newImageJSON.Studio, err = image.GetStudioName(studioReader, s) + newImageJSON.Studio, err = image.GetStudioName(ctx, studioReader, s) if err != nil { logger.Errorf("[images] <%s> error getting image studio name: %s", imageHash, err.Error()) continue } - imageGalleries, err := galleryReader.FindByImageID(s.ID) + imageGalleries, err := galleryReader.FindByImageID(ctx, s.ID) if err != nil { logger.Errorf("[images] <%s> error getting image galleries: %s", imageHash, err.Error()) continue } - newImageJSON.Galleries = t.getGalleryChecksums(imageGalleries) + for _, g := range imageGalleries { + if err := g.LoadFiles(ctx, galleryReader); err != nil { + logger.Errorf("[images] <%s> error getting image gallery files: %s", imageHash, err.Error()) + continue + } + } - performers, err := performerReader.FindByImageID(s.ID) + newImageJSON.Galleries = gallery.GetRefs(imageGalleries) + + performers, err := performerReader.FindByImageID(ctx, s.ID) if err != nil { logger.Errorf("[images] <%s> error getting image performer names: %s", imageHash, err.Error()) continue @@ -545,7 +683,7 @@ func exportImage(wg *sync.WaitGroup, jobChan <-chan *models.Image, repo models.R newImageJSON.Performers = performer.GetNames(performers) - tags, err := tagReader.FindByImageID(s.ID) + tags, err := tagReader.FindByImageID(ctx, s.ID) if err != nil { logger.Errorf("[images] <%s> error getting image tag names: %s", imageHash, err.Error()) continue @@ -554,8 +692,8 @@ func exportImage(wg *sync.WaitGroup, jobChan <-chan *models.Image, repo models.R newImageJSON.Tags = tag.GetNames(tags) if t.includeDependencies { - if s.StudioID.Valid { - t.studios.IDs = intslice.IntAppendUnique(t.studios.IDs, int(s.StudioID.Int64)) + if s.StudioID != nil { + t.studios.IDs = intslice.IntAppendUnique(t.studios.IDs, *s.StudioID) } t.galleries.IDs = intslice.IntAppendUniques(t.galleries.IDs, gallery.GetIDs(imageGalleries)) @@ -563,36 +701,26 @@ func exportImage(wg *sync.WaitGroup, jobChan <-chan *models.Image, repo models.R t.performers.IDs = intslice.IntAppendUniques(t.performers.IDs, performer.GetIDs(performers)) } - imageJSON, err := t.json.getImage(imageHash) - if err == nil && jsonschema.CompareJSON(*imageJSON, *newImageJSON) { - continue - } + fn := newImageJSON.Filename(filepath.Base(s.Path), s.Checksum) - if err := t.json.saveImage(imageHash, newImageJSON); err != nil { + if err := t.json.saveImage(fn, newImageJSON); err != nil { logger.Errorf("[images] <%s> failed to save json: %s", imageHash, err.Error()) } } } -func (t *ExportTask) getGalleryChecksums(galleries []*models.Gallery) (ret []string) { - for _, g := range galleries { - ret = append(ret, g.Checksum) - } - return -} - -func (t *ExportTask) ExportGalleries(workers int, repo models.ReaderRepository) { +func (t *ExportTask) ExportGalleries(ctx context.Context, workers int, repo Repository) { var galleriesWg sync.WaitGroup - reader := repo.Gallery() + reader := repo.Gallery var galleries []*models.Gallery var err error all := t.full || (t.galleries != nil && t.galleries.all) if all { - galleries, err = reader.All() + galleries, err = reader.All(ctx) } else if t.galleries != nil && len(t.galleries.IDs) > 0 { - galleries, err = reader.FindMany(t.galleries.IDs) + galleries, err = reader.FindMany(ctx, t.galleries.IDs) } if err != nil { @@ -606,7 +734,7 @@ func (t *ExportTask) ExportGalleries(workers int, repo models.ReaderRepository) for w := 0; w < workers; w++ { // create export Scene workers galleriesWg.Add(1) - go exportGallery(&galleriesWg, jobCh, repo, t) + go exportGallery(ctx, &galleriesWg, jobCh, repo, t) } for i, gallery := range galleries { @@ -616,11 +744,6 @@ func (t *ExportTask) ExportGalleries(workers int, repo models.ReaderRepository) logger.Progressf("[galleries] %d of %d", index, len(galleries)) } - t.Mappings.Galleries = append(t.Mappings.Galleries, jsonschema.PathNameMapping{ - Path: gallery.Path.String, - Name: gallery.Title.String, - Checksum: gallery.Checksum, - }) jobCh <- gallery } @@ -630,14 +753,19 @@ func (t *ExportTask) ExportGalleries(workers int, repo models.ReaderRepository) logger.Infof("[galleries] export complete in %s. %d workers used.", time.Since(startTime), workers) } -func exportGallery(wg *sync.WaitGroup, jobChan <-chan *models.Gallery, repo models.ReaderRepository, t *ExportTask) { +func exportGallery(ctx context.Context, wg *sync.WaitGroup, jobChan <-chan *models.Gallery, repo Repository, t *ExportTask) { defer wg.Done() - studioReader := repo.Studio() - performerReader := repo.Performer() - tagReader := repo.Tag() + studioReader := repo.Studio + performerReader := repo.Performer + tagReader := repo.Tag for g := range jobChan { - galleryHash := g.Checksum + if err := g.LoadFiles(ctx, repo.Gallery); err != nil { + logger.Errorf("[galleries] <%s> failed to fetch files for gallery: %s", g.DisplayName(), err.Error()) + continue + } + + galleryHash := g.Checksum() newGalleryJSON, err := gallery.ToBasicJSON(g) if err != nil { @@ -645,13 +773,34 @@ func exportGallery(wg *sync.WaitGroup, jobChan <-chan *models.Gallery, repo mode continue } - newGalleryJSON.Studio, err = gallery.GetStudioName(studioReader, g) + // export files + for _, f := range g.Files.List() { + exportFile(f, t) + } + + // export folder if necessary + if g.FolderID != nil { + folder, err := repo.Folder.Find(ctx, *g.FolderID) + if err != nil { + logger.Errorf("[galleries] <%s> error getting gallery folder: %v", galleryHash, err) + continue + } + + if folder == nil { + logger.Errorf("[galleries] <%s> unable to find gallery folder", galleryHash) + continue + } + + exportFolder(*folder, t) + } + + newGalleryJSON.Studio, err = gallery.GetStudioName(ctx, studioReader, g) if err != nil { logger.Errorf("[galleries] <%s> error getting gallery studio name: %s", galleryHash, err.Error()) continue } - performers, err := performerReader.FindByGalleryID(g.ID) + performers, err := performerReader.FindByGalleryID(ctx, g.ID) if err != nil { logger.Errorf("[galleries] <%s> error getting gallery performer names: %s", galleryHash, err.Error()) continue @@ -659,7 +808,7 @@ func exportGallery(wg *sync.WaitGroup, jobChan <-chan *models.Gallery, repo mode newGalleryJSON.Performers = performer.GetNames(performers) - tags, err := tagReader.FindByGalleryID(g.ID) + tags, err := tagReader.FindByGalleryID(ctx, g.ID) if err != nil { logger.Errorf("[galleries] <%s> error getting gallery tag names: %s", galleryHash, err.Error()) continue @@ -668,36 +817,44 @@ func exportGallery(wg *sync.WaitGroup, jobChan <-chan *models.Gallery, repo mode newGalleryJSON.Tags = tag.GetNames(tags) if t.includeDependencies { - if g.StudioID.Valid { - t.studios.IDs = intslice.IntAppendUnique(t.studios.IDs, int(g.StudioID.Int64)) + if g.StudioID != nil { + t.studios.IDs = intslice.IntAppendUnique(t.studios.IDs, *g.StudioID) } t.tags.IDs = intslice.IntAppendUniques(t.tags.IDs, tag.GetIDs(tags)) t.performers.IDs = intslice.IntAppendUniques(t.performers.IDs, performer.GetIDs(performers)) } - galleryJSON, err := t.json.getGallery(galleryHash) - if err == nil && jsonschema.CompareJSON(*galleryJSON, *newGalleryJSON) { - continue + basename := "" + // use id in case multiple galleries with the same basename + hash := strconv.Itoa(g.ID) + + switch { + case g.Path != "": + basename = filepath.Base(g.Path) + default: + basename = g.Title } - if err := t.json.saveGallery(galleryHash, newGalleryJSON); err != nil { + fn := newGalleryJSON.Filename(basename, hash) + + if err := t.json.saveGallery(fn, newGalleryJSON); err != nil { logger.Errorf("[galleries] <%s> failed to save json: %s", galleryHash, err.Error()) } } } -func (t *ExportTask) ExportPerformers(workers int, repo models.ReaderRepository) { +func (t *ExportTask) ExportPerformers(ctx context.Context, workers int, repo Repository) { var performersWg sync.WaitGroup - reader := repo.Performer() + reader := repo.Performer var performers []*models.Performer var err error all := t.full || (t.performers != nil && t.performers.all) if all { - performers, err = reader.All() + performers, err = reader.All(ctx) } else if t.performers != nil && len(t.performers.IDs) > 0 { - performers, err = reader.FindMany(t.performers.IDs) + performers, err = reader.FindMany(ctx, t.performers.IDs) } if err != nil { @@ -710,14 +867,13 @@ func (t *ExportTask) ExportPerformers(workers int, repo models.ReaderRepository) for w := 0; w < workers; w++ { // create export Performer workers performersWg.Add(1) - go t.exportPerformer(&performersWg, jobCh, repo) + go t.exportPerformer(ctx, &performersWg, jobCh, repo) } for i, performer := range performers { index := i + 1 logger.Progressf("[performers] %d of %d", index, len(performers)) - t.Mappings.Performers = append(t.Mappings.Performers, jsonschema.PathNameMapping{Name: performer.Name.String, Checksum: performer.Checksum}) jobCh <- performer // feed workers } @@ -727,20 +883,20 @@ func (t *ExportTask) ExportPerformers(workers int, repo models.ReaderRepository) logger.Infof("[performers] export complete in %s. %d workers used.", time.Since(startTime), workers) } -func (t *ExportTask) exportPerformer(wg *sync.WaitGroup, jobChan <-chan *models.Performer, repo models.ReaderRepository) { +func (t *ExportTask) exportPerformer(ctx context.Context, wg *sync.WaitGroup, jobChan <-chan *models.Performer, repo Repository) { defer wg.Done() - performerReader := repo.Performer() + performerReader := repo.Performer for p := range jobChan { - newPerformerJSON, err := performer.ToJSON(performerReader, p) + newPerformerJSON, err := performer.ToJSON(ctx, performerReader, p) if err != nil { logger.Errorf("[performers] <%s> error getting performer JSON: %s", p.Checksum, err.Error()) continue } - tags, err := repo.Tag().FindByPerformerID(p.ID) + tags, err := repo.Tag.FindByPerformerID(ctx, p.ID) if err != nil { logger.Errorf("[performers] <%s> error getting performer tags: %s", p.Checksum, err.Error()) continue @@ -752,30 +908,25 @@ func (t *ExportTask) exportPerformer(wg *sync.WaitGroup, jobChan <-chan *models. t.tags.IDs = intslice.IntAppendUniques(t.tags.IDs, tag.GetIDs(tags)) } - performerJSON, err := t.json.getPerformer(p.Checksum) - if err != nil { - logger.Debugf("[performers] error reading performer json: %s", err.Error()) - } else if jsonschema.CompareJSON(*performerJSON, *newPerformerJSON) { - continue - } + fn := newPerformerJSON.Filename() - if err := t.json.savePerformer(p.Checksum, newPerformerJSON); err != nil { + if err := t.json.savePerformer(fn, newPerformerJSON); err != nil { logger.Errorf("[performers] <%s> failed to save json: %s", p.Checksum, err.Error()) } } } -func (t *ExportTask) ExportStudios(workers int, repo models.ReaderRepository) { +func (t *ExportTask) ExportStudios(ctx context.Context, workers int, repo Repository) { var studiosWg sync.WaitGroup - reader := repo.Studio() + reader := repo.Studio var studios []*models.Studio var err error all := t.full || (t.studios != nil && t.studios.all) if all { - studios, err = reader.All() + studios, err = reader.All(ctx) } else if t.studios != nil && len(t.studios.IDs) > 0 { - studios, err = reader.FindMany(t.studios.IDs) + studios, err = reader.FindMany(ctx, t.studios.IDs) } if err != nil { @@ -789,14 +940,13 @@ func (t *ExportTask) ExportStudios(workers int, repo models.ReaderRepository) { for w := 0; w < workers; w++ { // create export Studio workers studiosWg.Add(1) - go t.exportStudio(&studiosWg, jobCh, repo) + go t.exportStudio(ctx, &studiosWg, jobCh, repo) } for i, studio := range studios { index := i + 1 logger.Progressf("[studios] %d of %d", index, len(studios)) - t.Mappings.Studios = append(t.Mappings.Studios, jsonschema.PathNameMapping{Name: studio.Name.String, Checksum: studio.Checksum}) jobCh <- studio // feed workers } @@ -806,41 +956,38 @@ func (t *ExportTask) ExportStudios(workers int, repo models.ReaderRepository) { logger.Infof("[studios] export complete in %s. %d workers used.", time.Since(startTime), workers) } -func (t *ExportTask) exportStudio(wg *sync.WaitGroup, jobChan <-chan *models.Studio, repo models.ReaderRepository) { +func (t *ExportTask) exportStudio(ctx context.Context, wg *sync.WaitGroup, jobChan <-chan *models.Studio, repo Repository) { defer wg.Done() - studioReader := repo.Studio() + studioReader := repo.Studio for s := range jobChan { - newStudioJSON, err := studio.ToJSON(studioReader, s) + newStudioJSON, err := studio.ToJSON(ctx, studioReader, s) if err != nil { logger.Errorf("[studios] <%s> error getting studio JSON: %s", s.Checksum, err.Error()) continue } - studioJSON, err := t.json.getStudio(s.Checksum) - if err == nil && jsonschema.CompareJSON(*studioJSON, *newStudioJSON) { - continue - } + fn := newStudioJSON.Filename() - if err := t.json.saveStudio(s.Checksum, newStudioJSON); err != nil { + if err := t.json.saveStudio(fn, newStudioJSON); err != nil { logger.Errorf("[studios] <%s> failed to save json: %s", s.Checksum, err.Error()) } } } -func (t *ExportTask) ExportTags(workers int, repo models.ReaderRepository) { +func (t *ExportTask) ExportTags(ctx context.Context, workers int, repo Repository) { var tagsWg sync.WaitGroup - reader := repo.Tag() + reader := repo.Tag var tags []*models.Tag var err error all := t.full || (t.tags != nil && t.tags.all) if all { - tags, err = reader.All() + tags, err = reader.All(ctx) } else if t.tags != nil && len(t.tags.IDs) > 0 { - tags, err = reader.FindMany(t.tags.IDs) + tags, err = reader.FindMany(ctx, t.tags.IDs) } if err != nil { @@ -854,17 +1001,13 @@ func (t *ExportTask) ExportTags(workers int, repo models.ReaderRepository) { for w := 0; w < workers; w++ { // create export Tag workers tagsWg.Add(1) - go t.exportTag(&tagsWg, jobCh, repo) + go t.exportTag(ctx, &tagsWg, jobCh, repo) } for i, tag := range tags { index := i + 1 logger.Progressf("[tags] %d of %d", index, len(tags)) - // generate checksum on the fly by name, since we don't store it - checksum := md5.FromString(tag.Name) - - t.Mappings.Tags = append(t.Mappings.Tags, jsonschema.PathNameMapping{Name: tag.Name, Checksum: checksum}) jobCh <- tag // feed workers } @@ -874,44 +1017,38 @@ func (t *ExportTask) ExportTags(workers int, repo models.ReaderRepository) { logger.Infof("[tags] export complete in %s. %d workers used.", time.Since(startTime), workers) } -func (t *ExportTask) exportTag(wg *sync.WaitGroup, jobChan <-chan *models.Tag, repo models.ReaderRepository) { +func (t *ExportTask) exportTag(ctx context.Context, wg *sync.WaitGroup, jobChan <-chan *models.Tag, repo Repository) { defer wg.Done() - tagReader := repo.Tag() + tagReader := repo.Tag for thisTag := range jobChan { - newTagJSON, err := tag.ToJSON(tagReader, thisTag) + newTagJSON, err := tag.ToJSON(ctx, tagReader, thisTag) if err != nil { logger.Errorf("[tags] <%s> error getting tag JSON: %s", thisTag.Name, err.Error()) continue } - // generate checksum on the fly by name, since we don't store it - checksum := md5.FromString(thisTag.Name) + fn := newTagJSON.Filename() - tagJSON, err := t.json.getTag(checksum) - if err == nil && jsonschema.CompareJSON(*tagJSON, *newTagJSON) { - continue - } - - if err := t.json.saveTag(checksum, newTagJSON); err != nil { - logger.Errorf("[tags] <%s> failed to save json: %s", checksum, err.Error()) + if err := t.json.saveTag(fn, newTagJSON); err != nil { + logger.Errorf("[tags] <%s> failed to save json: %s", fn, err.Error()) } } } -func (t *ExportTask) ExportMovies(workers int, repo models.ReaderRepository) { +func (t *ExportTask) ExportMovies(ctx context.Context, workers int, repo Repository) { var moviesWg sync.WaitGroup - reader := repo.Movie() + reader := repo.Movie var movies []*models.Movie var err error all := t.full || (t.movies != nil && t.movies.all) if all { - movies, err = reader.All() + movies, err = reader.All(ctx) } else if t.movies != nil && len(t.movies.IDs) > 0 { - movies, err = reader.FindMany(t.movies.IDs) + movies, err = reader.FindMany(ctx, t.movies.IDs) } if err != nil { @@ -925,14 +1062,13 @@ func (t *ExportTask) ExportMovies(workers int, repo models.ReaderRepository) { for w := 0; w < workers; w++ { // create export Studio workers moviesWg.Add(1) - go t.exportMovie(&moviesWg, jobCh, repo) + go t.exportMovie(ctx, &moviesWg, jobCh, repo) } for i, movie := range movies { index := i + 1 logger.Progressf("[movies] %d of %d", index, len(movies)) - t.Mappings.Movies = append(t.Mappings.Movies, jsonschema.PathNameMapping{Name: movie.Name.String, Checksum: movie.Checksum}) jobCh <- movie // feed workers } @@ -942,14 +1078,14 @@ func (t *ExportTask) ExportMovies(workers int, repo models.ReaderRepository) { logger.Infof("[movies] export complete in %s. %d workers used.", time.Since(startTime), workers) } -func (t *ExportTask) exportMovie(wg *sync.WaitGroup, jobChan <-chan *models.Movie, repo models.ReaderRepository) { +func (t *ExportTask) exportMovie(ctx context.Context, wg *sync.WaitGroup, jobChan <-chan *models.Movie, repo Repository) { defer wg.Done() - movieReader := repo.Movie() - studioReader := repo.Studio() + movieReader := repo.Movie + studioReader := repo.Studio for m := range jobChan { - newMovieJSON, err := movie.ToJSON(movieReader, studioReader, m) + newMovieJSON, err := movie.ToJSON(ctx, movieReader, studioReader, m) if err != nil { logger.Errorf("[movies] <%s> error getting tag JSON: %s", m.Checksum, err.Error()) @@ -962,23 +1098,18 @@ func (t *ExportTask) exportMovie(wg *sync.WaitGroup, jobChan <-chan *models.Movi } } - movieJSON, err := t.json.getMovie(m.Checksum) - if err != nil { - logger.Debugf("[movies] error reading movie json: %s", err.Error()) - } else if jsonschema.CompareJSON(*movieJSON, *newMovieJSON) { - continue - } + fn := newMovieJSON.Filename() - if err := t.json.saveMovie(m.Checksum, newMovieJSON); err != nil { - logger.Errorf("[movies] <%s> failed to save json: %s", m.Checksum, err.Error()) + if err := t.json.saveMovie(fn, newMovieJSON); err != nil { + logger.Errorf("[movies] <%s> failed to save json: %s", fn, err.Error()) } } } -func (t *ExportTask) ExportScrapedItems(repo models.ReaderRepository) { - qb := repo.ScrapedItem() - sqb := repo.Studio() - scrapedItems, err := qb.All() +func (t *ExportTask) ExportScrapedItems(ctx context.Context, repo Repository) { + qb := repo.ScrapedItem + sqb := repo.Studio + scrapedItems, err := qb.All(ctx) if err != nil { logger.Errorf("[scraped sites] failed to fetch all items: %s", err.Error()) } @@ -993,7 +1124,7 @@ func (t *ExportTask) ExportScrapedItems(repo models.ReaderRepository) { var studioName string if scrapedItem.StudioID.Valid { - studio, _ := sqb.Find(int(scrapedItem.StudioID.Int64)) + studio, _ := sqb.Find(ctx, int(scrapedItem.StudioID.Int64)) if studio != nil { studioName = studio.Name.String } diff --git a/internal/manager/task_generate.go b/internal/manager/task_generate.go index 3addf7bec..e75f51960 100644 --- a/internal/manager/task_generate.go +++ b/internal/manager/task_generate.go @@ -2,7 +2,6 @@ package manager import ( "context" - "errors" "fmt" "time" @@ -17,11 +16,45 @@ import ( "github.com/stashapp/stash/pkg/utils" ) +type GenerateMetadataInput struct { + Sprites *bool `json:"sprites"` + Previews *bool `json:"previews"` + ImagePreviews *bool `json:"imagePreviews"` + PreviewOptions *GeneratePreviewOptionsInput `json:"previewOptions"` + Markers *bool `json:"markers"` + MarkerImagePreviews *bool `json:"markerImagePreviews"` + MarkerScreenshots *bool `json:"markerScreenshots"` + Transcodes *bool `json:"transcodes"` + // Generate transcodes even if not required + ForceTranscodes *bool `json:"forceTranscodes"` + Phashes *bool `json:"phashes"` + InteractiveHeatmapsSpeeds *bool `json:"interactiveHeatmapsSpeeds"` + // scene ids to generate for + SceneIDs []string `json:"sceneIDs"` + // marker ids to generate for + MarkerIDs []string `json:"markerIDs"` + // overwrite existing media + Overwrite *bool `json:"overwrite"` +} + +type GeneratePreviewOptionsInput struct { + // Number of segments in a preview file + PreviewSegments *int `json:"previewSegments"` + // Preview segment duration, in seconds + PreviewSegmentDuration *float64 `json:"previewSegmentDuration"` + // Duration of start of video to exclude when generating previews + PreviewExcludeStart *string `json:"previewExcludeStart"` + // Duration of end of video to exclude when generating previews + PreviewExcludeEnd *string `json:"previewExcludeEnd"` + // Preset when generating preview + PreviewPreset *models.PreviewPreset `json:"previewPreset"` +} + const generateQueueSize = 200000 type GenerateJob struct { - txnManager models.TransactionManager - input models.GenerateMetadataInput + txnManager Repository + input GenerateMetadataInput overwrite bool fileNamingAlgo models.HashAlgorithm @@ -76,20 +109,24 @@ func (j *GenerateJob) Execute(ctx context.Context, progress *job.Progress) { Overwrite: j.overwrite, } - if err := j.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - qb := r.Scene() + if err := j.txnManager.WithTxn(ctx, func(ctx context.Context) error { + qb := j.txnManager.Scene if len(j.input.SceneIDs) == 0 && len(j.input.MarkerIDs) == 0 { totals = j.queueTasks(ctx, g, queue) } else { if len(j.input.SceneIDs) > 0 { - scenes, err = qb.FindMany(sceneIDs) + scenes, err = qb.FindMany(ctx, sceneIDs) for _, s := range scenes { + if err := s.LoadFiles(ctx, qb); err != nil { + return err + } + j.queueSceneJobs(ctx, g, s, queue, &totals) } } if len(j.input.MarkerIDs) > 0 { - markers, err = r.SceneMarker().FindMany(markerIDs) + markers, err = j.txnManager.SceneMarker.FindMany(ctx, markerIDs) if err != nil { return err } @@ -158,43 +195,41 @@ func (j *GenerateJob) queueTasks(ctx context.Context, g *generate.Generator, que findFilter := models.BatchFindFilter(batchSize) - if err := j.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - for more := true; more; { - if job.IsCancelled(ctx) { - return context.Canceled - } - - scenes, err := scene.Query(r.Scene(), nil, findFilter) - if err != nil { - return err - } - - for _, ss := range scenes { - if job.IsCancelled(ctx) { - return context.Canceled - } - - j.queueSceneJobs(ctx, g, ss, queue, &totals) - } - - if len(scenes) != batchSize { - more = false - } else { - *findFilter.Page++ - } + for more := true; more; { + if job.IsCancelled(ctx) { + return totals } - return nil - }); err != nil { - if !errors.Is(err, context.Canceled) { + scenes, err := scene.Query(ctx, j.txnManager.Scene, nil, findFilter) + if err != nil { logger.Errorf("Error encountered queuing files to scan: %s", err.Error()) + return totals + } + + for _, ss := range scenes { + if job.IsCancelled(ctx) { + return totals + } + + if err := ss.LoadFiles(ctx, j.txnManager.Scene); err != nil { + logger.Errorf("Error encountered queuing files to scan: %s", err.Error()) + return totals + } + + j.queueSceneJobs(ctx, g, ss, queue, &totals) + } + + if len(scenes) != batchSize { + more = false + } else { + *findFilter.Page++ } } return totals } -func getGeneratePreviewOptions(optionsInput models.GeneratePreviewOptionsInput) generate.PreviewOptions { +func getGeneratePreviewOptions(optionsInput GeneratePreviewOptionsInput) generate.PreviewOptions { config := config.GetInstance() ret := generate.PreviewOptions{ @@ -246,12 +281,11 @@ func (j *GenerateJob) queueSceneJobs(ctx context.Context, g *generate.Generator, generatePreviewOptions := j.input.PreviewOptions if generatePreviewOptions == nil { - generatePreviewOptions = &models.GeneratePreviewOptionsInput{} + generatePreviewOptions = &GeneratePreviewOptionsInput{} } options := getGeneratePreviewOptions(*generatePreviewOptions) if utils.IsTrue(j.input.Previews) { - task := &GeneratePreviewTask{ Scene: *scene, ImagePreview: utils.IsTrue(j.input.ImagePreviews), @@ -317,17 +351,21 @@ func (j *GenerateJob) queueSceneJobs(ctx context.Context, g *generate.Generator, } if utils.IsTrue(j.input.Phashes) { - task := &GeneratePhashTask{ - Scene: *scene, - fileNamingAlgorithm: j.fileNamingAlgo, - txnManager: j.txnManager, - Overwrite: j.overwrite, - } + // generate for all files in scene + for _, f := range scene.Files.List() { + task := &GeneratePhashTask{ + File: f, + fileNamingAlgorithm: j.fileNamingAlgo, + txnManager: j.txnManager, + fileUpdater: j.txnManager.File, + Overwrite: j.overwrite, + } - if task.shouldGenerate() { - totals.phashes++ - totals.tasks++ - queue <- task + if task.shouldGenerate() { + totals.phashes++ + totals.tasks++ + queue <- task + } } } diff --git a/internal/manager/task_generate_interactive_heatmap_speed.go b/internal/manager/task_generate_interactive_heatmap_speed.go index f6ca0a04e..27c780764 100644 --- a/internal/manager/task_generate_interactive_heatmap_speed.go +++ b/internal/manager/task_generate_interactive_heatmap_speed.go @@ -2,20 +2,19 @@ package manager import ( "context" - "database/sql" "fmt" + "github.com/stashapp/stash/pkg/file/video" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/scene" ) type GenerateInteractiveHeatmapSpeedTask struct { Scene models.Scene Overwrite bool fileNamingAlgorithm models.HashAlgorithm - TxnManager models.TransactionManager + TxnManager Repository } func (t *GenerateInteractiveHeatmapSpeedTask) GetDescription() string { @@ -28,7 +27,7 @@ func (t *GenerateInteractiveHeatmapSpeedTask) Start(ctx context.Context) { } videoChecksum := t.Scene.GetHash(t.fileNamingAlgorithm) - funscriptPath := scene.GetFunscriptPath(t.Scene.Path) + funscriptPath := video.GetFunscriptPath(t.Scene.Path) heatmapPath := instance.Paths.Scene.GetInteractiveHeatmapPath(videoChecksum) generator := NewInteractiveHeatmapSpeedGenerator(funscriptPath, heatmapPath) @@ -40,30 +39,13 @@ func (t *GenerateInteractiveHeatmapSpeedTask) Start(ctx context.Context) { return } - median := sql.NullInt64{ - Int64: generator.InteractiveSpeed, - Valid: true, - } + median := generator.InteractiveSpeed - var s *models.Scene - - if err := t.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - var err error - s, err = r.Scene().FindByPath(t.Scene.Path) - return err - }); err != nil { - logger.Error(err.Error()) - return - } - - if err := t.TxnManager.WithTxn(ctx, func(r models.Repository) error { - qb := r.Scene() - scenePartial := models.ScenePartial{ - ID: s.ID, - InteractiveSpeed: &median, - } - _, err := qb.Update(scenePartial) - return err + if err := t.TxnManager.WithTxn(ctx, func(ctx context.Context) error { + primaryFile := t.Scene.Files.Primary() + primaryFile.InteractiveSpeed = &median + qb := t.TxnManager.File + return qb.Update(ctx, primaryFile) }); err != nil { logger.Error(err.Error()) } @@ -71,7 +53,8 @@ func (t *GenerateInteractiveHeatmapSpeedTask) Start(ctx context.Context) { } func (t *GenerateInteractiveHeatmapSpeedTask) shouldGenerate() bool { - if !t.Scene.Interactive { + primaryFile := t.Scene.Files.Primary() + if primaryFile == nil || !primaryFile.Interactive { return false } sceneHash := t.Scene.GetHash(t.fileNamingAlgorithm) diff --git a/internal/manager/task_generate_markers.go b/internal/manager/task_generate_markers.go index 3ef53ddd0..aca8dcb2c 100644 --- a/internal/manager/task_generate_markers.go +++ b/internal/manager/task_generate_markers.go @@ -13,7 +13,7 @@ import ( ) type GenerateMarkersTask struct { - TxnManager models.TransactionManager + TxnManager Repository Scene *models.Scene Marker *models.SceneMarker Overwrite bool @@ -42,9 +42,9 @@ func (t *GenerateMarkersTask) Start(ctx context.Context) { if t.Marker != nil { var scene *models.Scene - if err := t.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { + if err := t.TxnManager.WithTxn(ctx, func(ctx context.Context) error { var err error - scene, err = r.Scene().Find(int(t.Marker.SceneID.Int64)) + scene, err = t.TxnManager.Scene.Find(ctx, int(t.Marker.SceneID.Int64)) return err }); err != nil { logger.Errorf("error finding scene for marker: %s", err.Error()) @@ -69,9 +69,9 @@ func (t *GenerateMarkersTask) Start(ctx context.Context) { func (t *GenerateMarkersTask) generateSceneMarkers(ctx context.Context) { var sceneMarkers []*models.SceneMarker - if err := t.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { + if err := t.TxnManager.WithTxn(ctx, func(ctx context.Context) error { var err error - sceneMarkers, err = r.SceneMarker().FindBySceneID(t.Scene.ID) + sceneMarkers, err = t.TxnManager.SceneMarker.FindBySceneID(ctx, t.Scene.ID) return err }); err != nil { logger.Errorf("error getting scene markers: %s", err.Error()) @@ -133,13 +133,9 @@ func (t *GenerateMarkersTask) generateMarker(videoFile *ffmpeg.VideoFile, scene func (t *GenerateMarkersTask) markersNeeded(ctx context.Context) int { markers := 0 - var sceneMarkers []*models.SceneMarker - if err := t.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - var err error - sceneMarkers, err = r.SceneMarker().FindBySceneID(t.Scene.ID) - return err - }); err != nil { - logger.Errorf("errror finding scene markers: %s", err.Error()) + sceneMarkers, err := t.TxnManager.SceneMarker.FindBySceneID(ctx, t.Scene.ID) + if err != nil { + logger.Errorf("error finding scene markers: %s", err.Error()) return 0 } diff --git a/internal/manager/task_generate_phash.go b/internal/manager/task_generate_phash.go index 880bb7794..a986c96f1 100644 --- a/internal/manager/task_generate_phash.go +++ b/internal/manager/task_generate_phash.go @@ -2,23 +2,25 @@ package manager import ( "context" - "database/sql" "fmt" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/hash/videophash" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" ) type GeneratePhashTask struct { - Scene models.Scene + File *file.VideoFile Overwrite bool fileNamingAlgorithm models.HashAlgorithm - txnManager models.TransactionManager + txnManager txn.Manager + fileUpdater file.Updater } func (t *GeneratePhashTask) GetDescription() string { - return fmt.Sprintf("Generating phash for %s", t.Scene.Path) + return fmt.Sprintf("Generating phash for %s", t.File.Path) } func (t *GeneratePhashTask) Start(ctx context.Context) { @@ -26,34 +28,27 @@ func (t *GeneratePhashTask) Start(ctx context.Context) { return } - ffprobe := instance.FFProbe - videoFile, err := ffprobe.NewVideoFile(t.Scene.Path) - if err != nil { - logger.Errorf("error reading video file: %s", err.Error()) - return - } - - hash, err := videophash.Generate(instance.FFMPEG, videoFile) + hash, err := videophash.Generate(instance.FFMPEG, t.File) if err != nil { logger.Errorf("error generating phash: %s", err.Error()) logErrorOutput(err) return } - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - qb := r.Scene() - hashValue := sql.NullInt64{Int64: int64(*hash), Valid: true} - scenePartial := models.ScenePartial{ - ID: t.Scene.ID, - Phash: &hashValue, - } - _, err := qb.Update(scenePartial) - return err + if err := txn.WithTxn(ctx, t.txnManager, func(ctx context.Context) error { + qb := t.fileUpdater + hashValue := int64(*hash) + t.File.Fingerprints = t.File.Fingerprints.AppendUnique(file.Fingerprint{ + Type: file.FingerprintTypePhash, + Fingerprint: hashValue, + }) + + return qb.Update(ctx, t.File) }); err != nil { logger.Error(err.Error()) } } func (t *GeneratePhashTask) shouldGenerate() bool { - return t.Overwrite || !t.Scene.Phash.Valid + return t.Overwrite || t.File.Fingerprints.Get(file.FingerprintTypePhash) == nil } diff --git a/internal/manager/task_generate_screenshot.go b/internal/manager/task_generate_screenshot.go index 80ef9e40d..3d7e528df 100644 --- a/internal/manager/task_generate_screenshot.go +++ b/internal/manager/task_generate_screenshot.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "os" - "time" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" @@ -17,22 +16,17 @@ type GenerateScreenshotTask struct { Scene models.Scene ScreenshotAt *float64 fileNamingAlgorithm models.HashAlgorithm - txnManager models.TransactionManager + txnManager Repository } func (t *GenerateScreenshotTask) Start(ctx context.Context) { scenePath := t.Scene.Path - ffprobe := instance.FFProbe - probeResult, err := ffprobe.NewVideoFile(scenePath) - if err != nil { - logger.Error(err.Error()) - return - } + videoFile := t.Scene.Files.Primary() var at float64 if t.ScreenshotAt == nil { - at = float64(probeResult.Duration) * 0.2 + at = float64(videoFile.Duration) * 0.2 } else { at = *t.ScreenshotAt } @@ -53,7 +47,7 @@ func (t *GenerateScreenshotTask) Start(ctx context.Context) { Overwrite: true, } - if err := g.Screenshot(context.TODO(), probeResult.Path, checksum, probeResult.Width, probeResult.Duration, generate.ScreenshotOptions{ + if err := g.Screenshot(context.TODO(), videoFile.Path, checksum, videoFile.Width, videoFile.Duration, generate.ScreenshotOptions{ At: &at, }); err != nil { logger.Errorf("Error generating screenshot: %v", err) @@ -74,25 +68,21 @@ func (t *GenerateScreenshotTask) Start(ctx context.Context) { return } - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - qb := r.Scene() - updatedTime := time.Now() - updatedScene := models.ScenePartial{ - ID: t.Scene.ID, - UpdatedAt: &models.SQLiteTimestamp{Timestamp: updatedTime}, - } + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + qb := t.txnManager.Scene + updatedScene := models.NewScenePartial() if err := scene.SetScreenshot(instance.Paths, checksum, coverImageData); err != nil { return fmt.Errorf("error writing screenshot: %v", err) } // update the scene cover table - if err := qb.UpdateCover(t.Scene.ID, coverImageData); err != nil { + if err := qb.UpdateCover(ctx, t.Scene.ID, coverImageData); err != nil { return fmt.Errorf("error setting screenshot: %v", err) } // update the scene with the update date - _, err = qb.Update(updatedScene) + _, err = qb.UpdatePartial(ctx, t.Scene.ID, updatedScene) if err != nil { return fmt.Errorf("error updating scene: %v", err) } diff --git a/internal/manager/task_generate_sprite.go b/internal/manager/task_generate_sprite.go index d7cde2c44..eb96d8f4c 100644 --- a/internal/manager/task_generate_sprite.go +++ b/internal/manager/task_generate_sprite.go @@ -51,6 +51,9 @@ func (t *GenerateSpriteTask) Start(ctx context.Context) { // required returns true if the sprite needs to be generated func (t GenerateSpriteTask) required() bool { + if t.Scene.Path == "" { + return false + } sceneHash := t.Scene.GetHash(t.fileNamingAlgorithm) return !t.doesSpriteExist(sceneHash) } diff --git a/internal/manager/task_identify.go b/internal/manager/task_identify.go index 678d0c7b3..078e541ee 100644 --- a/internal/manager/task_identify.go +++ b/internal/manager/task_identify.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "strings" "github.com/stashapp/stash/internal/identify" "github.com/stashapp/stash/pkg/job" @@ -13,22 +14,21 @@ import ( "github.com/stashapp/stash/pkg/scraper" "github.com/stashapp/stash/pkg/scraper/stashbox" "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/txn" ) var ErrInput = errors.New("invalid request input") type IdentifyJob struct { - txnManager models.TransactionManager postHookExecutor identify.SceneUpdatePostHookExecutor - input models.IdentifyMetadataInput + input identify.Options - stashBoxes models.StashBoxes + stashBoxes []*models.StashBox progress *job.Progress } -func CreateIdentifyJob(input models.IdentifyMetadataInput) *IdentifyJob { +func CreateIdentifyJob(input identify.Options) *IdentifyJob { return &IdentifyJob{ - txnManager: instance.TxnManager, postHookExecutor: instance.PluginCache, input: input, stashBoxes: instance.Config.GetStashBoxes(), @@ -51,9 +51,10 @@ func (j *IdentifyJob) Execute(ctx context.Context, progress *job.Progress) { // if scene ids provided, use those // otherwise, batch query for all scenes - ordering by path - if err := j.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { + // don't use a transaction to query scenes + if err := txn.WithDatabase(ctx, instance.Repository, func(ctx context.Context) error { if len(j.input.SceneIDs) == 0 { - return j.identifyAllScenes(ctx, r, sources) + return j.identifyAllScenes(ctx, sources) } sceneIDs, err := stringslice.StringSliceToIntSlice(j.input.SceneIDs) @@ -69,7 +70,7 @@ func (j *IdentifyJob) Execute(ctx context.Context, progress *job.Progress) { // find the scene var err error - scene, err := r.Scene().Find(id) + scene, err := instance.Repository.Scene.Find(ctx, id) if err != nil { return fmt.Errorf("error finding scene with id %d: %w", id, err) } @@ -87,7 +88,7 @@ func (j *IdentifyJob) Execute(ctx context.Context, progress *job.Progress) { } } -func (j *IdentifyJob) identifyAllScenes(ctx context.Context, r models.ReaderRepository, sources []identify.ScraperSource) error { +func (j *IdentifyJob) identifyAllScenes(ctx context.Context, sources []identify.ScraperSource) error { // exclude organised organised := false sceneFilter := scene.FilterFromPaths(j.input.Paths) @@ -101,7 +102,7 @@ func (j *IdentifyJob) identifyAllScenes(ctx context.Context, r models.ReaderRepo // get the count pp := 0 findFilter.PerPage = &pp - countResult, err := r.Scene().Query(models.SceneQueryOptions{ + countResult, err := instance.Repository.Scene.Query(ctx, models.SceneQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: findFilter, Count: true, @@ -114,7 +115,7 @@ func (j *IdentifyJob) identifyAllScenes(ctx context.Context, r models.ReaderRepo j.progress.SetTotal(countResult.Count) - return scene.BatchProcess(ctx, r.Scene(), sceneFilter, findFilter, func(scene *models.Scene) error { + return scene.BatchProcess(ctx, instance.Repository.Scene, sceneFilter, findFilter, func(scene *models.Scene) error { if job.IsCancelled(ctx) { return nil } @@ -132,16 +133,21 @@ func (j *IdentifyJob) identifyScene(ctx context.Context, s *models.Scene, source var taskError error j.progress.ExecuteTask("Identifying "+s.Path, func() { task := identify.SceneIdentifier{ + SceneReaderUpdater: instance.Repository.Scene, + StudioCreator: instance.Repository.Studio, + PerformerCreator: instance.Repository.Performer, + TagCreator: instance.Repository.Tag, + DefaultOptions: j.input.Options, Sources: sources, - ScreenshotSetter: &scene.PathsScreenshotSetter{ + ScreenshotSetter: &scene.PathsCoverSetter{ Paths: instance.Paths, FileNamingAlgorithm: instance.Config.GetVideoFileNamingAlgorithm(), }, SceneUpdatePostHookExecutor: j.postHookExecutor, } - taskError = task.Identify(ctx, j.txnManager, s) + taskError = task.Identify(ctx, instance.Repository, s) }) if taskError != nil { @@ -165,7 +171,12 @@ func (j *IdentifyJob) getSources() ([]identify.ScraperSource, error) { src = identify.ScraperSource{ Name: "stash-box: " + stashBox.Endpoint, Scraper: stashboxSource{ - stashbox.NewClient(*stashBox, j.txnManager), + stashbox.NewClient(*stashBox, instance.Repository, stashbox.Repository{ + Scene: instance.Repository.Scene, + Performer: instance.Repository.Performer, + Tag: instance.Repository.Tag, + Studio: instance.Repository.Studio, + }), stashBox.Endpoint, }, RemoteSite: stashBox.Endpoint, @@ -192,7 +203,7 @@ func (j *IdentifyJob) getSources() ([]identify.ScraperSource, error) { return ret, nil } -func (j *IdentifyJob) getStashBox(src *models.ScraperSourceInput) (*models.StashBox, error) { +func (j *IdentifyJob) getStashBox(src *scraper.Source) (*models.StashBox, error) { if src.ScraperID != nil { return nil, nil } @@ -202,7 +213,38 @@ func (j *IdentifyJob) getStashBox(src *models.ScraperSourceInput) (*models.Stash return nil, fmt.Errorf("%w: stash_box_index or stash_box_endpoint or scraper_id must be set", ErrInput) } - return j.stashBoxes.ResolveStashBox(*src) + return resolveStashBox(j.stashBoxes, *src) +} + +func resolveStashBox(sb []*models.StashBox, source scraper.Source) (*models.StashBox, error) { + if source.StashBoxIndex != nil { + index := source.StashBoxIndex + if *index < 0 || *index >= len(sb) { + return nil, fmt.Errorf("%w: invalid stash_box_index: %d", models.ErrScraperSource, index) + } + + return sb[*index], nil + } + + if source.StashBoxEndpoint != nil { + var ret *models.StashBox + endpoint := *source.StashBoxEndpoint + for _, b := range sb { + if strings.EqualFold(endpoint, b.Endpoint) { + ret = b + } + } + + if ret == nil { + return nil, fmt.Errorf(`%w: stash-box with endpoint "%s"`, models.ErrNotFound, endpoint) + } + + return ret, nil + } + + // neither stash-box inputs were provided, so assume it is a scraper + + return nil, nil } type stashboxSource struct { @@ -210,7 +252,7 @@ type stashboxSource struct { endpoint string } -func (s stashboxSource) ScrapeScene(ctx context.Context, sceneID int) (*models.ScrapedScene, error) { +func (s stashboxSource) ScrapeScene(ctx context.Context, sceneID int) (*scraper.ScrapedScene, error) { results, err := s.FindStashBoxSceneByFingerprints(ctx, sceneID) if err != nil { return nil, fmt.Errorf("error querying stash-box using scene ID %d: %w", sceneID, err) @@ -232,8 +274,8 @@ type scraperSource struct { scraperID string } -func (s scraperSource) ScrapeScene(ctx context.Context, sceneID int) (*models.ScrapedScene, error) { - content, err := s.cache.ScrapeID(ctx, s.scraperID, sceneID, models.ScrapeContentTypeScene) +func (s scraperSource) ScrapeScene(ctx context.Context, sceneID int) (*scraper.ScrapedScene, error) { + content, err := s.cache.ScrapeID(ctx, s.scraperID, sceneID, scraper.ScrapeContentTypeScene) if err != nil { return nil, err } @@ -243,7 +285,7 @@ func (s scraperSource) ScrapeScene(ctx context.Context, sceneID int) (*models.Sc return nil, nil } - if scene, ok := content.(models.ScrapedScene); ok { + if scene, ok := content.(scraper.ScrapedScene); ok { return &scene, nil } diff --git a/internal/manager/task_import.go b/internal/manager/task_import.go index ce6e22366..bd887b2e1 100644 --- a/internal/manager/task_import.go +++ b/internal/manager/task_import.go @@ -11,8 +11,7 @@ import ( "path/filepath" "time" - "github.com/stashapp/stash/internal/manager/config" - "github.com/stashapp/stash/pkg/database" + "github.com/99designs/gqlgen/graphql" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/gallery" "github.com/stashapp/stash/pkg/image" @@ -29,21 +28,26 @@ import ( ) type ImportTask struct { - txnManager models.TransactionManager + txnManager Repository json jsonUtils BaseDir string TmpZip string Reset bool - DuplicateBehaviour models.ImportDuplicateEnum + DuplicateBehaviour ImportDuplicateEnum MissingRefBehaviour models.ImportMissingRefEnum - mappings *jsonschema.Mappings scraped []jsonschema.ScrapedItem fileNamingAlgorithm models.HashAlgorithm } -func CreateImportTask(a models.HashAlgorithm, input models.ImportObjectsInput) (*ImportTask, error) { +type ImportObjectsInput struct { + File graphql.Upload `json:"file"` + DuplicateBehaviour ImportDuplicateEnum `json:"duplicateBehaviour"` + MissingRefBehaviour models.ImportMissingRefEnum `json:"missingRefBehaviour"` +} + +func CreateImportTask(a models.HashAlgorithm, input ImportObjectsInput) (*ImportTask, error) { baseDir, err := instance.Paths.Generated.TempDir("import") if err != nil { logger.Errorf("error creating temporary directory for import: %s", err.Error()) @@ -66,7 +70,7 @@ func CreateImportTask(a models.HashAlgorithm, input models.ImportObjectsInput) ( } return &ImportTask{ - txnManager: GetInstance().TxnManager, + txnManager: GetInstance().Repository, BaseDir: baseDir, TmpZip: tmpZip, Reset: false, @@ -101,17 +105,12 @@ func (t *ImportTask) Start(ctx context.Context) { // set default behaviour if not provided if !t.DuplicateBehaviour.IsValid() { - t.DuplicateBehaviour = models.ImportDuplicateEnumFail + t.DuplicateBehaviour = ImportDuplicateEnumFail } if !t.MissingRefBehaviour.IsValid() { t.MissingRefBehaviour = models.ImportMissingRefEnumFail } - t.mappings, _ = t.json.getMappings() - if t.mappings == nil { - logger.Error("missing mappings json") - return - } scraped, _ := t.json.getScraped() if scraped == nil { logger.Warn("missing scraped json") @@ -119,7 +118,7 @@ func (t *ImportTask) Start(ctx context.Context) { t.scraped = scraped if t.Reset { - err := database.Reset(config.GetInstance().GetDatabasePath()) + err := t.txnManager.Reset() if err != nil { logger.Errorf("Error resetting database: %s", err.Error()) @@ -131,6 +130,7 @@ func (t *ImportTask) Start(ctx context.Context) { t.ImportPerformers(ctx) t.ImportStudios(ctx) t.ImportMovies(ctx) + t.ImportFiles(ctx) t.ImportGalleries(ctx) t.ImportScrapedItems(ctx) @@ -194,27 +194,38 @@ func (t *ImportTask) unzipFile() error { func (t *ImportTask) ImportPerformers(ctx context.Context) { logger.Info("[performers] importing") - for i, mappingJSON := range t.mappings.Performers { + path := t.json.json.Performers + files, err := os.ReadDir(path) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + logger.Errorf("[performers] failed to read performers directory: %v", err) + } + + return + } + + for i, fi := range files { index := i + 1 - performerJSON, err := t.json.getPerformer(mappingJSON.Checksum) + performerJSON, err := jsonschema.LoadPerformerFile(filepath.Join(path, fi.Name())) if err != nil { logger.Errorf("[performers] failed to read json: %s", err.Error()) continue } - logger.Progressf("[performers] %d of %d", index, len(t.mappings.Performers)) + logger.Progressf("[performers] %d of %d", index, len(files)) - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - readerWriter := r.Performer() + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + r := t.txnManager + readerWriter := r.Performer importer := &performer.Importer{ ReaderWriter: readerWriter, - TagWriter: r.Tag(), + TagWriter: r.Tag, Input: *performerJSON, } - return performImport(importer, t.DuplicateBehaviour) + return performImport(ctx, importer, t.DuplicateBehaviour) }); err != nil { - logger.Errorf("[performers] <%s> import failed: %s", mappingJSON.Checksum, err.Error()) + logger.Errorf("[performers] <%s> import failed: %s", fi.Name(), err.Error()) } } @@ -226,18 +237,28 @@ func (t *ImportTask) ImportStudios(ctx context.Context) { logger.Info("[studios] importing") - for i, mappingJSON := range t.mappings.Studios { + path := t.json.json.Studios + files, err := os.ReadDir(path) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + logger.Errorf("[studios] failed to read studios directory: %v", err) + } + + return + } + + for i, fi := range files { index := i + 1 - studioJSON, err := t.json.getStudio(mappingJSON.Checksum) + studioJSON, err := jsonschema.LoadStudioFile(filepath.Join(path, fi.Name())) if err != nil { logger.Errorf("[studios] failed to read json: %s", err.Error()) continue } - logger.Progressf("[studios] %d of %d", index, len(t.mappings.Studios)) + logger.Progressf("[studios] %d of %d", index, len(files)) - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - return t.ImportStudio(studioJSON, pendingParent, r.Studio()) + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + return t.ImportStudio(ctx, studioJSON, pendingParent, t.txnManager.Studio) }); err != nil { if errors.Is(err, studio.ErrParentStudioNotExist) { // add to the pending parent list so that it is created after the parent @@ -247,7 +268,7 @@ func (t *ImportTask) ImportStudios(ctx context.Context) { continue } - logger.Errorf("[studios] <%s> failed to create: %s", mappingJSON.Checksum, err.Error()) + logger.Errorf("[studios] <%s> failed to create: %s", fi.Name(), err.Error()) continue } } @@ -258,8 +279,8 @@ func (t *ImportTask) ImportStudios(ctx context.Context) { for _, s := range pendingParent { for _, orphanStudioJSON := range s { - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - return t.ImportStudio(orphanStudioJSON, nil, r.Studio()) + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + return t.ImportStudio(ctx, orphanStudioJSON, nil, t.txnManager.Studio) }); err != nil { logger.Errorf("[studios] <%s> failed to create: %s", orphanStudioJSON.Name, err.Error()) continue @@ -271,7 +292,7 @@ func (t *ImportTask) ImportStudios(ctx context.Context) { logger.Info("[studios] import complete") } -func (t *ImportTask) ImportStudio(studioJSON *jsonschema.Studio, pendingParent map[string][]*jsonschema.Studio, readerWriter models.StudioReaderWriter) error { +func (t *ImportTask) ImportStudio(ctx context.Context, studioJSON *jsonschema.Studio, pendingParent map[string][]*jsonschema.Studio, readerWriter studio.NameFinderCreatorUpdater) error { importer := &studio.Importer{ ReaderWriter: readerWriter, Input: *studioJSON, @@ -283,7 +304,7 @@ func (t *ImportTask) ImportStudio(studioJSON *jsonschema.Studio, pendingParent m importer.MissingRefBehaviour = models.ImportMissingRefEnumFail } - if err := performImport(importer, t.DuplicateBehaviour); err != nil { + if err := performImport(ctx, importer, t.DuplicateBehaviour); err != nil { return err } @@ -291,7 +312,7 @@ func (t *ImportTask) ImportStudio(studioJSON *jsonschema.Studio, pendingParent m s := pendingParent[studioJSON.Name] for _, childStudioJSON := range s { // map is nil since we're not checking parent studios at this point - if err := t.ImportStudio(childStudioJSON, nil, readerWriter); err != nil { + if err := t.ImportStudio(ctx, childStudioJSON, nil, readerWriter); err != nil { return fmt.Errorf("failed to create child studio <%s>: %s", childStudioJSON.Name, err.Error()) } } @@ -305,19 +326,30 @@ func (t *ImportTask) ImportStudio(studioJSON *jsonschema.Studio, pendingParent m func (t *ImportTask) ImportMovies(ctx context.Context) { logger.Info("[movies] importing") - for i, mappingJSON := range t.mappings.Movies { + path := t.json.json.Movies + files, err := os.ReadDir(path) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + logger.Errorf("[movies] failed to read movies directory: %v", err) + } + + return + } + + for i, fi := range files { index := i + 1 - movieJSON, err := t.json.getMovie(mappingJSON.Checksum) + movieJSON, err := jsonschema.LoadMovieFile(filepath.Join(path, fi.Name())) if err != nil { logger.Errorf("[movies] failed to read json: %s", err.Error()) continue } - logger.Progressf("[movies] %d of %d", index, len(t.mappings.Movies)) + logger.Progressf("[movies] %d of %d", index, len(files)) - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - readerWriter := r.Movie() - studioReaderWriter := r.Studio() + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + r := t.txnManager + readerWriter := r.Movie + studioReaderWriter := r.Studio movieImporter := &movie.Importer{ ReaderWriter: readerWriter, @@ -326,9 +358,9 @@ func (t *ImportTask) ImportMovies(ctx context.Context) { MissingRefBehaviour: t.MissingRefBehaviour, } - return performImport(movieImporter, t.DuplicateBehaviour) + return performImport(ctx, movieImporter, t.DuplicateBehaviour) }); err != nil { - logger.Errorf("[movies] <%s> import failed: %s", mappingJSON.Checksum, err.Error()) + logger.Errorf("[movies] <%s> import failed: %s", fi.Name(), err.Error()) continue } } @@ -336,27 +368,130 @@ func (t *ImportTask) ImportMovies(ctx context.Context) { logger.Info("[movies] import complete") } +func (t *ImportTask) ImportFiles(ctx context.Context) { + logger.Info("[files] importing") + + path := t.json.json.Files + files, err := os.ReadDir(path) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + logger.Errorf("[files] failed to read files directory: %v", err) + } + + return + } + + pendingParent := make(map[string][]jsonschema.DirEntry) + + for i, fi := range files { + index := i + 1 + fileJSON, err := jsonschema.LoadFileFile(filepath.Join(path, fi.Name())) + if err != nil { + logger.Errorf("[files] failed to read json: %s", err.Error()) + continue + } + + logger.Progressf("[files] %d of %d", index, len(files)) + + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + return t.ImportFile(ctx, fileJSON, pendingParent) + }); err != nil { + if errors.Is(err, errZipFileNotExist) { + // add to the pending parent list so that it is created after the parent + s := pendingParent[fileJSON.DirEntry().ZipFile] + s = append(s, fileJSON) + pendingParent[fileJSON.DirEntry().ZipFile] = s + continue + } + + logger.Errorf("[files] <%s> failed to create: %s", fi.Name(), err.Error()) + continue + } + } + + // create the leftover studios, warning for missing parents + if len(pendingParent) > 0 { + logger.Warnf("[files] importing files with missing zip files") + + for _, s := range pendingParent { + for _, orphanFileJSON := range s { + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + return t.ImportFile(ctx, orphanFileJSON, nil) + }); err != nil { + logger.Errorf("[files] <%s> failed to create: %s", orphanFileJSON.DirEntry().Path, err.Error()) + continue + } + } + } + } + + logger.Info("[files] import complete") +} + +func (t *ImportTask) ImportFile(ctx context.Context, fileJSON jsonschema.DirEntry, pendingParent map[string][]jsonschema.DirEntry) error { + r := t.txnManager + readerWriter := r.File + + fileImporter := &fileFolderImporter{ + ReaderWriter: readerWriter, + FolderStore: r.Folder, + Input: fileJSON, + } + + // ignore duplicate files - don't overwrite + if err := performImport(ctx, fileImporter, ImportDuplicateEnumIgnore); err != nil { + return err + } + + // now create the files pending this file's creation + s := pendingParent[fileJSON.DirEntry().Path] + for _, childFileJSON := range s { + // map is nil since we're not checking parent studios at this point + if err := t.ImportFile(ctx, childFileJSON, nil); err != nil { + return fmt.Errorf("failed to create child file <%s>: %s", childFileJSON.DirEntry().Path, err.Error()) + } + } + + // delete the entry from the map so that we know its not left over + delete(pendingParent, fileJSON.DirEntry().Path) + + return nil +} + func (t *ImportTask) ImportGalleries(ctx context.Context) { logger.Info("[galleries] importing") - for i, mappingJSON := range t.mappings.Galleries { + path := t.json.json.Galleries + files, err := os.ReadDir(path) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + logger.Errorf("[galleries] failed to read galleries directory: %v", err) + } + + return + } + + for i, fi := range files { index := i + 1 - galleryJSON, err := t.json.getGallery(mappingJSON.Checksum) + galleryJSON, err := jsonschema.LoadGalleryFile(filepath.Join(path, fi.Name())) if err != nil { logger.Errorf("[galleries] failed to read json: %s", err.Error()) continue } - logger.Progressf("[galleries] %d of %d", index, len(t.mappings.Galleries)) + logger.Progressf("[galleries] %d of %d", index, len(files)) - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - readerWriter := r.Gallery() - tagWriter := r.Tag() - performerWriter := r.Performer() - studioWriter := r.Studio() + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + r := t.txnManager + readerWriter := r.Gallery + tagWriter := r.Tag + performerWriter := r.Performer + studioWriter := r.Studio galleryImporter := &gallery.Importer{ ReaderWriter: readerWriter, + FolderFinder: r.Folder, + FileFinder: r.File, PerformerWriter: performerWriter, StudioWriter: studioWriter, TagWriter: tagWriter, @@ -364,9 +499,9 @@ func (t *ImportTask) ImportGalleries(ctx context.Context) { MissingRefBehaviour: t.MissingRefBehaviour, } - return performImport(galleryImporter, t.DuplicateBehaviour) + return performImport(ctx, galleryImporter, t.DuplicateBehaviour) }); err != nil { - logger.Errorf("[galleries] <%s> import failed to commit: %s", mappingJSON.Checksum, err.Error()) + logger.Errorf("[galleries] <%s> import failed to commit: %s", fi.Name(), err.Error()) continue } } @@ -378,18 +513,28 @@ func (t *ImportTask) ImportTags(ctx context.Context) { pendingParent := make(map[string][]*jsonschema.Tag) logger.Info("[tags] importing") - for i, mappingJSON := range t.mappings.Tags { + path := t.json.json.Tags + files, err := os.ReadDir(path) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + logger.Errorf("[tags] failed to read tags directory: %v", err) + } + + return + } + + for i, fi := range files { index := i + 1 - tagJSON, err := t.json.getTag(mappingJSON.Checksum) + tagJSON, err := jsonschema.LoadTagFile(filepath.Join(path, fi.Name())) if err != nil { logger.Errorf("[tags] failed to read json: %s", err.Error()) continue } - logger.Progressf("[tags] %d of %d", index, len(t.mappings.Tags)) + logger.Progressf("[tags] %d of %d", index, len(files)) - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - return t.ImportTag(tagJSON, pendingParent, false, r.Tag()) + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + return t.ImportTag(ctx, tagJSON, pendingParent, false, t.txnManager.Tag) }); err != nil { var parentError tag.ParentTagNotExistError if errors.As(err, &parentError) { @@ -397,15 +542,15 @@ func (t *ImportTask) ImportTags(ctx context.Context) { continue } - logger.Errorf("[tags] <%s> failed to import: %s", mappingJSON.Checksum, err.Error()) + logger.Errorf("[tags] <%s> failed to import: %s", fi.Name(), err.Error()) continue } } for _, s := range pendingParent { for _, orphanTagJSON := range s { - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - return t.ImportTag(orphanTagJSON, nil, true, r.Tag()) + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + return t.ImportTag(ctx, orphanTagJSON, nil, true, t.txnManager.Tag) }); err != nil { logger.Errorf("[tags] <%s> failed to create: %s", orphanTagJSON.Name, err.Error()) continue @@ -416,7 +561,7 @@ func (t *ImportTask) ImportTags(ctx context.Context) { logger.Info("[tags] import complete") } -func (t *ImportTask) ImportTag(tagJSON *jsonschema.Tag, pendingParent map[string][]*jsonschema.Tag, fail bool, readerWriter models.TagReaderWriter) error { +func (t *ImportTask) ImportTag(ctx context.Context, tagJSON *jsonschema.Tag, pendingParent map[string][]*jsonschema.Tag, fail bool, readerWriter tag.NameFinderCreatorUpdater) error { importer := &tag.Importer{ ReaderWriter: readerWriter, Input: *tagJSON, @@ -428,12 +573,12 @@ func (t *ImportTask) ImportTag(tagJSON *jsonschema.Tag, pendingParent map[string importer.MissingRefBehaviour = models.ImportMissingRefEnumFail } - if err := performImport(importer, t.DuplicateBehaviour); err != nil { + if err := performImport(ctx, importer, t.DuplicateBehaviour); err != nil { return err } for _, childTagJSON := range pendingParent[tagJSON.Name] { - if err := t.ImportTag(childTagJSON, pendingParent, fail, readerWriter); err != nil { + if err := t.ImportTag(ctx, childTagJSON, pendingParent, fail, readerWriter); err != nil { var parentError tag.ParentTagNotExistError if errors.As(err, &parentError) { pendingParent[parentError.MissingParent()] = append(pendingParent[parentError.MissingParent()], tagJSON) @@ -450,15 +595,16 @@ func (t *ImportTask) ImportTag(tagJSON *jsonschema.Tag, pendingParent map[string } func (t *ImportTask) ImportScrapedItems(ctx context.Context) { - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { logger.Info("[scraped sites] importing") - qb := r.ScrapedItem() - sqb := r.Studio() + r := t.txnManager + qb := r.ScrapedItem + sqb := r.Studio currentTime := time.Now() for i, mappingJSON := range t.scraped { index := i + 1 - logger.Progressf("[scraped sites] %d of %d", index, len(t.mappings.Scenes)) + logger.Progressf("[scraped sites] %d of %d", index, len(t.scraped)) newScrapedItem := models.ScrapedItem{ Title: sql.NullString{String: mappingJSON.Title, Valid: true}, @@ -477,7 +623,7 @@ func (t *ImportTask) ImportScrapedItems(ctx context.Context) { UpdatedAt: models.SQLiteTimestamp{Timestamp: t.getTimeFromJSONTime(mappingJSON.UpdatedAt)}, } - studio, err := sqb.FindByName(mappingJSON.Studio, false) + studio, err := sqb.FindByName(ctx, mappingJSON.Studio, false) if err != nil { logger.Errorf("[scraped sites] failed to fetch studio: %s", err.Error()) } @@ -485,7 +631,7 @@ func (t *ImportTask) ImportScrapedItems(ctx context.Context) { newScrapedItem.StudioID = sql.NullInt64{Int64: int64(studio.ID), Valid: true} } - _, err = qb.Create(newScrapedItem) + _, err = qb.Create(ctx, newScrapedItem) if err != nil { logger.Errorf("[scraped sites] <%s> failed to create: %s", newScrapedItem.Title.String, err.Error()) } @@ -502,44 +648,53 @@ func (t *ImportTask) ImportScrapedItems(ctx context.Context) { func (t *ImportTask) ImportScenes(ctx context.Context) { logger.Info("[scenes] importing") - for i, mappingJSON := range t.mappings.Scenes { + path := t.json.json.Scenes + files, err := os.ReadDir(path) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + logger.Errorf("[scenes] failed to read scenes directory: %v", err) + } + + return + } + + for i, fi := range files { index := i + 1 - logger.Progressf("[scenes] %d of %d", index, len(t.mappings.Scenes)) + logger.Progressf("[scenes] %d of %d", index, len(files)) - sceneJSON, err := t.json.getScene(mappingJSON.Checksum) + sceneJSON, err := jsonschema.LoadSceneFile(filepath.Join(path, fi.Name())) if err != nil { - logger.Infof("[scenes] <%s> json parse failure: %s", mappingJSON.Checksum, err.Error()) + logger.Infof("[scenes] <%s> json parse failure: %s", fi.Name(), err.Error()) continue } - sceneHash := mappingJSON.Checksum - - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - readerWriter := r.Scene() - tagWriter := r.Tag() - galleryWriter := r.Gallery() - movieWriter := r.Movie() - performerWriter := r.Performer() - studioWriter := r.Studio() - markerWriter := r.SceneMarker() + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + r := t.txnManager + readerWriter := r.Scene + tagWriter := r.Tag + galleryWriter := r.Gallery + movieWriter := r.Movie + performerWriter := r.Performer + studioWriter := r.Studio + markerWriter := r.SceneMarker sceneImporter := &scene.Importer{ ReaderWriter: readerWriter, Input: *sceneJSON, - Path: mappingJSON.Path, + FileFinder: r.File, FileNamingAlgorithm: t.fileNamingAlgorithm, MissingRefBehaviour: t.MissingRefBehaviour, - GalleryWriter: galleryWriter, + GalleryFinder: galleryWriter, MovieWriter: movieWriter, PerformerWriter: performerWriter, StudioWriter: studioWriter, TagWriter: tagWriter, } - if err := performImport(sceneImporter, t.DuplicateBehaviour); err != nil { + if err := performImport(ctx, sceneImporter, t.DuplicateBehaviour); err != nil { return err } @@ -553,14 +708,14 @@ func (t *ImportTask) ImportScenes(ctx context.Context) { TagWriter: tagWriter, } - if err := performImport(markerImporter, t.DuplicateBehaviour); err != nil { + if err := performImport(ctx, markerImporter, t.DuplicateBehaviour); err != nil { return err } } return nil }); err != nil { - logger.Errorf("[scenes] <%s> import failed: %s", sceneHash, err.Error()) + logger.Errorf("[scenes] <%s> import failed: %s", fi.Name(), err.Error()) } } @@ -570,42 +725,51 @@ func (t *ImportTask) ImportScenes(ctx context.Context) { func (t *ImportTask) ImportImages(ctx context.Context) { logger.Info("[images] importing") - for i, mappingJSON := range t.mappings.Images { + path := t.json.json.Images + files, err := os.ReadDir(path) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + logger.Errorf("[images] failed to read images directory: %v", err) + } + + return + } + + for i, fi := range files { index := i + 1 - logger.Progressf("[images] %d of %d", index, len(t.mappings.Images)) + logger.Progressf("[images] %d of %d", index, len(files)) - imageJSON, err := t.json.getImage(mappingJSON.Checksum) + imageJSON, err := jsonschema.LoadImageFile(filepath.Join(path, fi.Name())) if err != nil { - logger.Infof("[images] <%s> json parse failure: %s", mappingJSON.Checksum, err.Error()) + logger.Infof("[images] <%s> json parse failure: %s", fi.Name(), err.Error()) continue } - imageHash := mappingJSON.Checksum - - if err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - readerWriter := r.Image() - tagWriter := r.Tag() - galleryWriter := r.Gallery() - performerWriter := r.Performer() - studioWriter := r.Studio() + if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error { + r := t.txnManager + readerWriter := r.Image + tagWriter := r.Tag + galleryWriter := r.Gallery + performerWriter := r.Performer + studioWriter := r.Studio imageImporter := &image.Importer{ ReaderWriter: readerWriter, + FileFinder: r.File, Input: *imageJSON, - Path: mappingJSON.Path, MissingRefBehaviour: t.MissingRefBehaviour, - GalleryWriter: galleryWriter, + GalleryFinder: galleryWriter, PerformerWriter: performerWriter, StudioWriter: studioWriter, TagWriter: tagWriter, } - return performImport(imageImporter, t.DuplicateBehaviour) + return performImport(ctx, imageImporter, t.DuplicateBehaviour) }); err != nil { - logger.Errorf("[images] <%s> import failed: %s", imageHash, err.Error()) + logger.Errorf("[images] <%s> import failed: %s", fi.Name(), err.Error()) } } diff --git a/internal/manager/task_migrate_hash.go b/internal/manager/task_migrate_hash.go index e0c7c1131..902cbd69a 100644 --- a/internal/manager/task_migrate_hash.go +++ b/internal/manager/task_migrate_hash.go @@ -14,13 +14,13 @@ type MigrateHashTask struct { // Start starts the task. func (t *MigrateHashTask) Start() { - if !t.Scene.OSHash.Valid || !t.Scene.Checksum.Valid { + if t.Scene.OSHash == "" || t.Scene.Checksum == "" { // nothing to do return } - oshash := t.Scene.OSHash.String - checksum := t.Scene.Checksum.String + oshash := t.Scene.OSHash + checksum := t.Scene.Checksum oldHash := oshash newHash := checksum diff --git a/internal/manager/task_plugin.go b/internal/manager/task_plugin.go index 31023445d..78cd5db02 100644 --- a/internal/manager/task_plugin.go +++ b/internal/manager/task_plugin.go @@ -6,10 +6,10 @@ import ( "github.com/stashapp/stash/pkg/job" "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/plugin" ) -func (s *Manager) RunPluginTask(ctx context.Context, pluginID string, taskName string, args []*models.PluginArgInput) int { +func (s *Manager) RunPluginTask(ctx context.Context, pluginID string, taskName string, args []*plugin.PluginArgInput) int { j := job.MakeJobExec(func(jobCtx context.Context, progress *job.Progress) { pluginProgress := make(chan float64) task, err := s.PluginCache.CreateTask(ctx, pluginID, taskName, args, pluginProgress) diff --git a/internal/manager/task_scan.go b/internal/manager/task_scan.go index 05ffe168e..55ee9f614 100644 --- a/internal/manager/task_scan.go +++ b/internal/manager/task_scan.go @@ -4,326 +4,357 @@ import ( "context" "errors" "fmt" - "os" + "io/fs" "path/filepath" + "regexp" "time" - "github.com/remeh/sizedwaitgroup" - "github.com/stashapp/stash/internal/manager/config" "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/file/video" "github.com/stashapp/stash/pkg/fsutil" + "github.com/stashapp/stash/pkg/gallery" + "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/job" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scene" "github.com/stashapp/stash/pkg/scene/generate" - "github.com/stashapp/stash/pkg/utils" ) -const scanQueueSize = 200000 - -type ScanJob struct { - txnManager models.TransactionManager - input models.ScanMetadataInput - subscriptions *subscriptionManager +type scanner interface { + Scan(ctx context.Context, handlers []file.Handler, options file.ScanOptions, progressReporter file.ProgressReporter) } -type scanFile struct { - path string - info os.FileInfo - caseSensitiveFs bool +type ScanJob struct { + scanner scanner + input ScanMetadataInput + subscriptions *subscriptionManager } func (j *ScanJob) Execute(ctx context.Context, progress *job.Progress) { input := j.input - paths := getScanPaths(input.Paths) if job.IsCancelled(ctx) { logger.Info("Stopping due to user request") return } + sp := getScanPaths(input.Paths) + paths := make([]string, len(sp)) + for i, p := range sp { + paths[i] = p.Path + } + start := time.Now() - config := config.GetInstance() - parallelTasks := config.GetParallelTasksWithAutoDetection() - logger.Infof("Scan started with %d parallel tasks", parallelTasks) - - fileQueue := make(chan scanFile, scanQueueSize) - go func() { - total, newFiles := j.queueFiles(ctx, paths, fileQueue, parallelTasks) - - if !job.IsCancelled(ctx) { - progress.SetTotal(total) - logger.Infof("Finished counting files. Total files to scan: %d, %d new files found", total, newFiles) - } - }() - - wg := sizedwaitgroup.New(parallelTasks) - - fileNamingAlgo := config.GetVideoFileNamingAlgorithm() - calculateMD5 := config.IsCalculateMD5() - - var err error - - var galleries []string - - mutexManager := utils.NewMutexManager() - - for f := range fileQueue { - if job.IsCancelled(ctx) { - break - } - - if isGallery(f.path) { - galleries = append(galleries, f.path) - } - - if err := instance.Paths.Generated.EnsureTmpDir(); err != nil { - logger.Warnf("couldn't create temporary directory: %v", err) - } - - wg.Add() - task := ScanTask{ - TxnManager: j.txnManager, - file: file.FSFile(f.path, f.info), - UseFileMetadata: utils.IsTrue(input.UseFileMetadata), - StripFileExtension: utils.IsTrue(input.StripFileExtension), - fileNamingAlgorithm: fileNamingAlgo, - calculateMD5: calculateMD5, - GeneratePreview: utils.IsTrue(input.ScanGeneratePreviews), - GenerateImagePreview: utils.IsTrue(input.ScanGenerateImagePreviews), - GenerateSprite: utils.IsTrue(input.ScanGenerateSprites), - GeneratePhash: utils.IsTrue(input.ScanGeneratePhashes), - GenerateThumbnails: utils.IsTrue(input.ScanGenerateThumbnails), - progress: progress, - CaseSensitiveFs: f.caseSensitiveFs, - mutexManager: mutexManager, - } - - go func() { - task.Start(ctx) - wg.Done() - progress.Increment() - }() - } - - wg.Wait() - - if err := instance.Paths.Generated.EmptyTmpDir(); err != nil { - logger.Warnf("couldn't empty temporary directory: %v", err) - } - - elapsed := time.Since(start) - logger.Info(fmt.Sprintf("Scan finished (%s)", elapsed)) - - if job.IsCancelled(ctx) { - logger.Info("Stopping due to user request") - return - } - - if err != nil { - return - } - - progress.ExecuteTask("Associating galleries", func() { - for _, path := range galleries { - wg.Add() - task := ScanTask{ - TxnManager: j.txnManager, - file: file.FSFile(path, nil), // hopefully info is not needed - UseFileMetadata: false, - } - - go task.associateGallery(ctx, &wg) - wg.Wait() - } - logger.Info("Finished gallery association") - }) - - j.subscriptions.notify() -} - -func (j *ScanJob) queueFiles(ctx context.Context, paths []*models.StashConfig, scanQueue chan<- scanFile, parallelTasks int) (total int, newFiles int) { - defer close(scanQueue) + const taskQueueSize = 200000 + taskQueue := job.NewTaskQueue(ctx, progress, taskQueueSize, instance.Config.GetParallelTasksWithAutoDetection()) var minModTime time.Time if j.input.Filter != nil && j.input.Filter.MinModTime != nil { minModTime = *j.input.Filter.MinModTime } - wg := sizedwaitgroup.New(parallelTasks) + j.scanner.Scan(ctx, getScanHandlers(j.input, taskQueue, progress), file.ScanOptions{ + Paths: paths, + ScanFilters: []file.PathFilter{newScanFilter(instance.Config, minModTime)}, + ZipFileExtensions: instance.Config.GetGalleryExtensions(), + ParallelTasks: instance.Config.GetParallelTasksWithAutoDetection(), + HandlerRequiredFilters: []file.Filter{newHandlerRequiredFilter(instance.Config)}, + }, progress) - for _, sp := range paths { - csFs, er := fsutil.IsFsPathCaseSensitive(sp.Path) - if er != nil { - logger.Warnf("Cannot determine fs case sensitivity: %s", er.Error()) - } + taskQueue.Close() - err := walkFilesToScan(sp, func(path string, info os.FileInfo, err error) error { - // check stop - if job.IsCancelled(ctx) { - return context.Canceled - } - - // exit early on cutoff - if info.Mode().IsRegular() && info.ModTime().Before(minModTime) { - return nil - } - - wg.Add() - - go func() { - defer wg.Done() - - // #1756 - skip zero length files and directories - if info.IsDir() { - return - } - - if info.Size() == 0 { - logger.Infof("Skipping zero-length file: %s", path) - return - } - - total++ - if !j.doesPathExist(ctx, path) { - newFiles++ - } - - scanQueue <- scanFile{ - path: path, - info: info, - caseSensitiveFs: csFs, - } - }() - - return nil - }) - - wg.Wait() - - if err != nil && !errors.Is(err, context.Canceled) { - logger.Errorf("Error encountered queuing files to scan: %s", err.Error()) - return - } - } - - return -} - -func (j *ScanJob) doesPathExist(ctx context.Context, path string) bool { - config := config.GetInstance() - vidExt := config.GetVideoExtensions() - imgExt := config.GetImageExtensions() - gExt := config.GetGalleryExtensions() - - ret := false - txnErr := j.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - switch { - case fsutil.MatchExtension(path, gExt): - g, _ := r.Gallery().FindByPath(path) - if g != nil { - ret = true - } - case fsutil.MatchExtension(path, vidExt): - s, _ := r.Scene().FindByPath(path) - if s != nil { - ret = true - } - case fsutil.MatchExtension(path, imgExt): - i, _ := r.Image().FindByPath(path) - if i != nil { - ret = true - } - } - - return nil - }) - if txnErr != nil { - logger.Warnf("error checking if file exists in database: %v", txnErr) - } - - return ret -} - -type ScanTask struct { - TxnManager models.TransactionManager - file file.SourceFile - UseFileMetadata bool - StripFileExtension bool - calculateMD5 bool - fileNamingAlgorithm models.HashAlgorithm - GenerateSprite bool - GeneratePhash bool - GeneratePreview bool - GenerateImagePreview bool - GenerateThumbnails bool - zipGallery *models.Gallery - progress *job.Progress - CaseSensitiveFs bool - - mutexManager *utils.MutexManager -} - -func (t *ScanTask) Start(ctx context.Context) { - var s *models.Scene - path := t.file.Path() - t.progress.ExecuteTask("Scanning "+path, func() { - switch { - case isGallery(path): - t.scanGallery(ctx) - case isVideo(path): - s = t.scanScene(ctx) - case isImage(path): - t.scanImage(ctx) - case isCaptions(path): - t.associateCaptions(ctx) - } - }) - - if s == nil { + if job.IsCancelled(ctx) { + logger.Info("Stopping due to user request") return } - // Handle the case of a scene - iwg := sizedwaitgroup.New(2) + elapsed := time.Since(start) + logger.Info(fmt.Sprintf("Scan finished (%s)", elapsed)) - if t.GenerateSprite { - iwg.Add() + j.subscriptions.notify() +} - go t.progress.ExecuteTask(fmt.Sprintf("Generating sprites for %s", path), func() { +type extensionConfig struct { + vidExt []string + imgExt []string + zipExt []string +} + +func newExtensionConfig(c *config.Instance) extensionConfig { + return extensionConfig{ + vidExt: c.GetVideoExtensions(), + imgExt: c.GetImageExtensions(), + zipExt: c.GetGalleryExtensions(), + } +} + +type fileCounter interface { + CountByFileID(ctx context.Context, fileID file.ID) (int, error) +} + +// handlerRequiredFilter returns true if a File's handler needs to be executed despite the file not being updated. +type handlerRequiredFilter struct { + extensionConfig + SceneFinder fileCounter + ImageFinder fileCounter + GalleryFinder fileCounter +} + +func newHandlerRequiredFilter(c *config.Instance) *handlerRequiredFilter { + db := instance.Database + + return &handlerRequiredFilter{ + extensionConfig: newExtensionConfig(c), + SceneFinder: db.Scene, + ImageFinder: db.Image, + GalleryFinder: db.Gallery, + } +} + +func (f *handlerRequiredFilter) Accept(ctx context.Context, ff file.File) bool { + path := ff.Base().Path + isVideoFile := fsutil.MatchExtension(path, f.vidExt) + isImageFile := fsutil.MatchExtension(path, f.imgExt) + isZipFile := fsutil.MatchExtension(path, f.zipExt) + + var counter fileCounter + + switch { + case isVideoFile: + // return true if there are no scenes associated + counter = f.SceneFinder + case isImageFile: + counter = f.ImageFinder + case isZipFile: + counter = f.GalleryFinder + } + + if counter == nil { + return false + } + + n, err := counter.CountByFileID(ctx, ff.Base().ID) + if err != nil { + // just ignore + return false + } + + // execute handler if there are no related objects + return n == 0 +} + +type scanFilter struct { + extensionConfig + stashPaths []*config.StashConfig + generatedPath string + videoExcludeRegex []*regexp.Regexp + imageExcludeRegex []*regexp.Regexp + minModTime time.Time +} + +func newScanFilter(c *config.Instance, minModTime time.Time) *scanFilter { + return &scanFilter{ + extensionConfig: newExtensionConfig(c), + stashPaths: c.GetStashPaths(), + generatedPath: c.GetGeneratedPath(), + videoExcludeRegex: generateRegexps(c.GetExcludes()), + imageExcludeRegex: generateRegexps(c.GetImageExcludes()), + minModTime: minModTime, + } +} + +func (f *scanFilter) Accept(ctx context.Context, path string, info fs.FileInfo) bool { + if fsutil.IsPathInDir(f.generatedPath, path) { + return false + } + + // exit early on cutoff + if info.Mode().IsRegular() && info.ModTime().Before(f.minModTime) { + return false + } + + isVideoFile := fsutil.MatchExtension(path, f.vidExt) + isImageFile := fsutil.MatchExtension(path, f.imgExt) + isZipFile := fsutil.MatchExtension(path, f.zipExt) + + // handle caption files + if fsutil.MatchExtension(path, video.CaptionExts) { + // we don't include caption files in the file scan, but we do need + // to handle them + video.AssociateCaptions(ctx, path, instance.Repository, instance.Database.File, instance.Database.File) + + return false + } + + if !info.IsDir() && !isVideoFile && !isImageFile && !isZipFile { + return false + } + + // #1756 - skip zero length files + if !info.IsDir() && info.Size() == 0 { + logger.Infof("Skipping zero-length file: %s", path) + return false + } + + s := getStashFromDirPath(f.stashPaths, path) + + if s == nil { + return false + } + + // shortcut: skip the directory entirely if it matches both exclusion patterns + // add a trailing separator so that it correctly matches against patterns like path/.* + pathExcludeTest := path + string(filepath.Separator) + if (s.ExcludeVideo || matchFileRegex(pathExcludeTest, f.videoExcludeRegex)) && (s.ExcludeImage || matchFileRegex(pathExcludeTest, f.imageExcludeRegex)) { + return false + } + + if isVideoFile && (s.ExcludeVideo || matchFileRegex(path, f.videoExcludeRegex)) { + return false + } else if (isImageFile || isZipFile) && s.ExcludeImage || matchFileRegex(path, f.imageExcludeRegex) { + return false + } + + return true +} + +type scanConfig struct { + isGenerateThumbnails bool +} + +func (c *scanConfig) GetCreateGalleriesFromFolders() bool { + return instance.Config.GetCreateGalleriesFromFolders() +} + +func (c *scanConfig) IsGenerateThumbnails() bool { + return c.isGenerateThumbnails +} + +func getScanHandlers(options ScanMetadataInput, taskQueue *job.TaskQueue, progress *job.Progress) []file.Handler { + db := instance.Database + pluginCache := instance.PluginCache + + return []file.Handler{ + &file.FilteredHandler{ + Filter: file.FilterFunc(imageFileFilter), + Handler: &image.ScanHandler{ + CreatorUpdater: db.Image, + GalleryFinder: db.Gallery, + ThumbnailGenerator: &imageThumbnailGenerator{}, + ScanConfig: &scanConfig{ + isGenerateThumbnails: options.ScanGenerateThumbnails, + }, + PluginCache: pluginCache, + }, + }, + &file.FilteredHandler{ + Filter: file.FilterFunc(galleryFileFilter), + Handler: &gallery.ScanHandler{ + CreatorUpdater: db.Gallery, + SceneFinderUpdater: db.Scene, + PluginCache: pluginCache, + }, + }, + &file.FilteredHandler{ + Filter: file.FilterFunc(videoFileFilter), + Handler: &scene.ScanHandler{ + CreatorUpdater: db.Scene, + PluginCache: pluginCache, + CoverGenerator: &coverGenerator{}, + ScanGenerator: &sceneGenerators{ + input: options, + taskQueue: taskQueue, + progress: progress, + }, + }, + }, + } +} + +type imageThumbnailGenerator struct{} + +func (g *imageThumbnailGenerator) GenerateThumbnail(ctx context.Context, i *models.Image, f *file.ImageFile) error { + thumbPath := GetInstance().Paths.Generated.GetThumbnailPath(i.Checksum, models.DefaultGthumbWidth) + exists, _ := fsutil.FileExists(thumbPath) + if exists { + return nil + } + + if f.Height <= models.DefaultGthumbWidth && f.Width <= models.DefaultGthumbWidth { + return nil + } + + logger.Debugf("Generating thumbnail for %s", f.Path) + + encoder := image.NewThumbnailEncoder(instance.FFMPEG) + data, err := encoder.GetThumbnail(f, models.DefaultGthumbWidth) + + if err != nil { + // don't log for animated images + if !errors.Is(err, image.ErrNotSupportedForThumbnail) { + return fmt.Errorf("getting thumbnail for image %s: %w", f.Path, err) + } + return nil + } + + err = fsutil.WriteFile(thumbPath, data) + if err != nil { + return fmt.Errorf("writing thumbnail for image %s: %w", f.Path, err) + } + + return nil +} + +type sceneGenerators struct { + input ScanMetadataInput + taskQueue *job.TaskQueue + progress *job.Progress +} + +func (g *sceneGenerators) Generate(ctx context.Context, s *models.Scene, f *file.VideoFile) error { + const overwrite = false + + progress := g.progress + t := g.input + path := f.Path + config := instance.Config + fileNamingAlgorithm := config.GetVideoFileNamingAlgorithm() + + if t.ScanGenerateSprites { + progress.AddTotal(1) + g.taskQueue.Add(fmt.Sprintf("Generating sprites for %s", path), func(ctx context.Context) { taskSprite := GenerateSpriteTask{ Scene: *s, - Overwrite: false, - fileNamingAlgorithm: t.fileNamingAlgorithm, + Overwrite: overwrite, + fileNamingAlgorithm: fileNamingAlgorithm, } taskSprite.Start(ctx) - iwg.Done() + progress.Increment() }) } - if t.GeneratePhash { - iwg.Add() - - go t.progress.ExecuteTask(fmt.Sprintf("Generating phash for %s", path), func() { + if t.ScanGeneratePhashes { + progress.AddTotal(1) + g.taskQueue.Add(fmt.Sprintf("Generating phash for %s", path), func(ctx context.Context) { taskPhash := GeneratePhashTask{ - Scene: *s, - fileNamingAlgorithm: t.fileNamingAlgorithm, - txnManager: t.TxnManager, + File: f, + fileNamingAlgorithm: fileNamingAlgorithm, + txnManager: instance.Database, + fileUpdater: instance.Database.File, + Overwrite: overwrite, } taskPhash.Start(ctx) - iwg.Done() + progress.Increment() }) } - if t.GeneratePreview { - iwg.Add() - - go t.progress.ExecuteTask(fmt.Sprintf("Generating preview for %s", path), func() { - options := getGeneratePreviewOptions(models.GeneratePreviewOptionsInput{}) - const overwrite = false + if t.ScanGeneratePreviews { + progress.AddTotal(1) + g.taskQueue.Add(fmt.Sprintf("Generating preview for %s", path), func(ctx context.Context) { + options := getGeneratePreviewOptions(GeneratePreviewOptionsInput{}) g := &generate.Generator{ Encoder: instance.FFMPEG, @@ -335,73 +366,16 @@ func (t *ScanTask) Start(ctx context.Context) { taskPreview := GeneratePreviewTask{ Scene: *s, - ImagePreview: t.GenerateImagePreview, + ImagePreview: t.ScanGenerateImagePreviews, Options: options, Overwrite: overwrite, - fileNamingAlgorithm: t.fileNamingAlgorithm, + fileNamingAlgorithm: fileNamingAlgorithm, generator: g, } taskPreview.Start(ctx) - iwg.Done() + progress.Increment() }) } - iwg.Wait() -} - -func walkFilesToScan(s *models.StashConfig, f filepath.WalkFunc) error { - config := config.GetInstance() - vidExt := config.GetVideoExtensions() - imgExt := config.GetImageExtensions() - gExt := config.GetGalleryExtensions() - capExt := scene.CaptionExts - excludeVidRegex := generateRegexps(config.GetExcludes()) - excludeImgRegex := generateRegexps(config.GetImageExcludes()) - - // don't scan zip images directly - if file.IsZipPath(s.Path) { - logger.Warnf("Cannot rescan zip image %s. Rescan zip gallery instead.", s.Path) - return nil - } - - generatedPath := config.GetGeneratedPath() - - return fsutil.SymWalk(s.Path, func(path string, info os.FileInfo, err error) error { - if err != nil { - logger.Warnf("error scanning %s: %s", path, err.Error()) - return nil - } - - if info.IsDir() { - // #1102 - ignore files in generated path - if fsutil.IsPathInDir(generatedPath, path) { - return filepath.SkipDir - } - - // shortcut: skip the directory entirely if it matches both exclusion patterns - // add a trailing separator so that it correctly matches against patterns like path/.* - pathExcludeTest := path + string(filepath.Separator) - if (s.ExcludeVideo || matchFileRegex(pathExcludeTest, excludeVidRegex)) && (s.ExcludeImage || matchFileRegex(pathExcludeTest, excludeImgRegex)) { - return filepath.SkipDir - } - - return nil - } - - if !s.ExcludeVideo && fsutil.MatchExtension(path, vidExt) && !matchFileRegex(path, excludeVidRegex) { - return f(path, info, err) - } - - if !s.ExcludeImage { - if (fsutil.MatchExtension(path, imgExt) || fsutil.MatchExtension(path, gExt)) && !matchFileRegex(path, excludeImgRegex) { - return f(path, info, err) - } - } - - if fsutil.MatchExtension(path, capExt) { - return f(path, info, err) - } - - return nil - }) + return nil } diff --git a/internal/manager/task_scan_gallery.go b/internal/manager/task_scan_gallery.go deleted file mode 100644 index 8c3f5c550..000000000 --- a/internal/manager/task_scan_gallery.go +++ /dev/null @@ -1,169 +0,0 @@ -package manager - -import ( - "archive/zip" - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/remeh/sizedwaitgroup" - "github.com/stashapp/stash/internal/manager/config" - "github.com/stashapp/stash/pkg/file" - "github.com/stashapp/stash/pkg/gallery" - "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" -) - -func (t *ScanTask) scanGallery(ctx context.Context) { - var g *models.Gallery - path := t.file.Path() - images := 0 - scanImages := false - - if err := t.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - var err error - g, err = r.Gallery().FindByPath(path) - - if g != nil && err == nil { - images, err = r.Image().CountByGalleryID(g.ID) - if err != nil { - return fmt.Errorf("error getting images for zip gallery %s: %s", path, err.Error()) - } - } - - return err - }); err != nil { - logger.Error(err.Error()) - return - } - - scanner := gallery.Scanner{ - Scanner: gallery.FileScanner(&file.FSHasher{}), - ImageExtensions: instance.Config.GetImageExtensions(), - StripFileExtension: t.StripFileExtension, - CaseSensitiveFs: t.CaseSensitiveFs, - TxnManager: t.TxnManager, - Paths: instance.Paths, - PluginCache: instance.PluginCache, - MutexManager: t.mutexManager, - } - - var err error - if g != nil { - g, scanImages, err = scanner.ScanExisting(ctx, g, t.file) - if err != nil { - logger.Error(err.Error()) - return - } - - // scan the zip files if the gallery has no images - scanImages = scanImages || images == 0 - } else { - g, scanImages, err = scanner.ScanNew(ctx, t.file) - if err != nil { - logger.Error(err.Error()) - } - } - - if g != nil { - if scanImages { - t.scanZipImages(ctx, g) - } else { - // in case thumbnails have been deleted, regenerate them - t.regenerateZipImages(ctx, g) - } - } -} - -// associates a gallery to a scene with the same basename -func (t *ScanTask) associateGallery(ctx context.Context, wg *sizedwaitgroup.SizedWaitGroup) { - path := t.file.Path() - if err := t.TxnManager.WithTxn(ctx, func(r models.Repository) error { - qb := r.Gallery() - sqb := r.Scene() - g, err := qb.FindByPath(path) - if err != nil { - return err - } - - if g == nil { - // associate is run after scan is finished - // should only happen if gallery is a directory or an io error occurs during hashing - logger.Warnf("associate: gallery %s not found in DB", path) - return nil - } - - basename := strings.TrimSuffix(path, filepath.Ext(path)) - var relatedFiles []string - vExt := config.GetInstance().GetVideoExtensions() - // make a list of media files that can be related to the gallery - for _, ext := range vExt { - related := basename + "." + ext - // exclude gallery extensions from the related files - if !isGallery(related) { - relatedFiles = append(relatedFiles, related) - } - } - for _, scenePath := range relatedFiles { - scene, _ := sqb.FindByPath(scenePath) - // found related Scene - if scene != nil { - sceneGalleries, _ := sqb.FindByGalleryID(g.ID) // check if gallery is already associated to the scene - isAssoc := false - for _, sg := range sceneGalleries { - if scene.ID == sg.ID { - isAssoc = true - break - } - } - if !isAssoc { - logger.Infof("associate: Gallery %s is related to scene: %d", path, scene.ID) - if err := sqb.UpdateGalleries(scene.ID, []int{g.ID}); err != nil { - return err - } - } - } - } - return nil - }); err != nil { - logger.Error(err.Error()) - } - wg.Done() -} - -func (t *ScanTask) scanZipImages(ctx context.Context, zipGallery *models.Gallery) { - err := walkGalleryZip(zipGallery.Path.String, func(f *zip.File) error { - // copy this task and change the filename - subTask := *t - - // filepath is the zip file and the internal file name, separated by a null byte - subTask.file = file.ZipFile(zipGallery.Path.String, f) - subTask.zipGallery = zipGallery - - // run the subtask and wait for it to complete - subTask.Start(ctx) - return nil - }) - if err != nil { - logger.Warnf("failed to scan zip file images for %s: %s", zipGallery.Path.String, err.Error()) - } -} - -func (t *ScanTask) regenerateZipImages(ctx context.Context, zipGallery *models.Gallery) { - var images []*models.Image - if err := t.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - iqb := r.Image() - - var err error - images, err = iqb.FindByGalleryID(zipGallery.ID) - return err - }); err != nil { - logger.Warnf("failed to find gallery images: %s", err.Error()) - return - } - - for _, img := range images { - t.generateThumbnail(img) - } -} diff --git a/internal/manager/task_scan_image.go b/internal/manager/task_scan_image.go deleted file mode 100644 index 36aff5a04..000000000 --- a/internal/manager/task_scan_image.go +++ /dev/null @@ -1,176 +0,0 @@ -package manager - -import ( - "context" - "database/sql" - "errors" - "os/exec" - "path/filepath" - "time" - - "github.com/stashapp/stash/internal/manager/config" - "github.com/stashapp/stash/pkg/file" - "github.com/stashapp/stash/pkg/fsutil" - "github.com/stashapp/stash/pkg/gallery" - "github.com/stashapp/stash/pkg/hash/md5" - "github.com/stashapp/stash/pkg/image" - "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/plugin" -) - -func (t *ScanTask) scanImage(ctx context.Context) { - var i *models.Image - path := t.file.Path() - - if err := t.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - var err error - i, err = r.Image().FindByPath(path) - return err - }); err != nil { - logger.Error(err.Error()) - return - } - - scanner := image.Scanner{ - Scanner: image.FileScanner(&file.FSHasher{}), - StripFileExtension: t.StripFileExtension, - TxnManager: t.TxnManager, - Paths: GetInstance().Paths, - PluginCache: instance.PluginCache, - MutexManager: t.mutexManager, - } - - var err error - if i != nil { - i, err = scanner.ScanExisting(ctx, i, t.file) - if err != nil { - logger.Error(err.Error()) - return - } - } else { - i, err = scanner.ScanNew(ctx, t.file) - if err != nil { - logger.Error(err.Error()) - return - } - - if i != nil { - if t.zipGallery != nil { - // associate with gallery - if err := t.TxnManager.WithTxn(ctx, func(r models.Repository) error { - return gallery.AddImage(r.Gallery(), t.zipGallery.ID, i.ID) - }); err != nil { - logger.Error(err.Error()) - return - } - } else if config.GetInstance().GetCreateGalleriesFromFolders() { - // create gallery from folder or associate with existing gallery - logger.Infof("Associating image %s with folder gallery", i.Path) - var galleryID int - var isNewGallery bool - if err := t.TxnManager.WithTxn(ctx, func(r models.Repository) error { - var err error - galleryID, isNewGallery, err = t.associateImageWithFolderGallery(i.ID, r.Gallery()) - return err - }); err != nil { - logger.Error(err.Error()) - return - } - - if isNewGallery { - GetInstance().PluginCache.ExecutePostHooks(ctx, galleryID, plugin.GalleryCreatePost, nil, nil) - } - } - } - } - - if i != nil { - t.generateThumbnail(i) - } -} - -func (t *ScanTask) associateImageWithFolderGallery(imageID int, qb models.GalleryReaderWriter) (galleryID int, isNew bool, err error) { - // find a gallery with the path specified - path := filepath.Dir(t.file.Path()) - var g *models.Gallery - g, err = qb.FindByPath(path) - if err != nil { - return - } - - if g == nil { - checksum := md5.FromString(path) - - // create the gallery - currentTime := time.Now() - - newGallery := models.Gallery{ - Checksum: checksum, - Path: sql.NullString{ - String: path, - Valid: true, - }, - CreatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, - UpdatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, - Title: sql.NullString{ - String: fsutil.GetNameFromPath(path, false), - Valid: true, - }, - } - - logger.Infof("Creating gallery for folder %s", path) - g, err = qb.Create(newGallery) - if err != nil { - return 0, false, err - } - - isNew = true - } - - // associate image with gallery - err = gallery.AddImage(qb, g.ID, imageID) - galleryID = g.ID - return -} - -func (t *ScanTask) generateThumbnail(i *models.Image) { - if !t.GenerateThumbnails { - return - } - - thumbPath := GetInstance().Paths.Generated.GetThumbnailPath(i.Checksum, models.DefaultGthumbWidth) - exists, _ := fsutil.FileExists(thumbPath) - if exists { - return - } - - config, _, err := image.DecodeSourceImage(i) - if err != nil { - logger.Errorf("error reading image %s: %s", i.Path, err.Error()) - return - } - - if config.Height > models.DefaultGthumbWidth || config.Width > models.DefaultGthumbWidth { - encoder := image.NewThumbnailEncoder(instance.FFMPEG) - data, err := encoder.GetThumbnail(i, models.DefaultGthumbWidth) - - if err != nil { - // don't log for animated images - if !errors.Is(err, image.ErrNotSupportedForThumbnail) { - logger.Errorf("error getting thumbnail for image %s: %s", i.Path, err.Error()) - - var exitErr *exec.ExitError - if errors.As(err, &exitErr) { - logger.Errorf("stderr: %s", string(exitErr.Stderr)) - } - } - return - } - - err = fsutil.WriteFile(thumbPath, data) - if err != nil { - logger.Errorf("error writing thumbnail for image %s: %s", i.Path, err) - } - } -} diff --git a/internal/manager/task_scan_scene.go b/internal/manager/task_scan_scene.go deleted file mode 100644 index 218a2e012..000000000 --- a/internal/manager/task_scan_scene.go +++ /dev/null @@ -1,127 +0,0 @@ -package manager - -import ( - "context" - "path/filepath" - - "github.com/stashapp/stash/internal/manager/config" - "github.com/stashapp/stash/pkg/ffmpeg" - "github.com/stashapp/stash/pkg/file" - "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/scene" - "github.com/stashapp/stash/pkg/scene/generate" -) - -type sceneScreenshotter struct { - g *generate.Generator -} - -func (ss *sceneScreenshotter) GenerateScreenshot(ctx context.Context, probeResult *ffmpeg.VideoFile, hash string) error { - return ss.g.Screenshot(ctx, probeResult.Path, hash, probeResult.Width, probeResult.Duration, generate.ScreenshotOptions{}) -} - -func (ss *sceneScreenshotter) GenerateThumbnail(ctx context.Context, probeResult *ffmpeg.VideoFile, hash string) error { - return ss.g.Thumbnail(ctx, probeResult.Path, hash, probeResult.Duration, generate.ScreenshotOptions{}) -} - -func (t *ScanTask) scanScene(ctx context.Context) *models.Scene { - logError := func(err error) *models.Scene { - logger.Error(err.Error()) - return nil - } - - var retScene *models.Scene - var s *models.Scene - - if err := t.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - var err error - s, err = r.Scene().FindByPath(t.file.Path()) - return err - }); err != nil { - logger.Error(err.Error()) - return nil - } - - g := &generate.Generator{ - Encoder: instance.FFMPEG, - LockManager: instance.ReadLockManager, - ScenePaths: instance.Paths.Scene, - } - - scanner := scene.Scanner{ - Scanner: scene.FileScanner(&file.FSHasher{}, t.fileNamingAlgorithm, t.calculateMD5), - StripFileExtension: t.StripFileExtension, - FileNamingAlgorithm: t.fileNamingAlgorithm, - TxnManager: t.TxnManager, - Paths: GetInstance().Paths, - Screenshotter: &sceneScreenshotter{ - g: g, - }, - VideoFileCreator: &instance.FFProbe, - PluginCache: instance.PluginCache, - MutexManager: t.mutexManager, - UseFileMetadata: t.UseFileMetadata, - } - - if s != nil { - if err := scanner.ScanExisting(ctx, s, t.file); err != nil { - return logError(err) - } - - return nil - } - - var err error - retScene, err = scanner.ScanNew(ctx, t.file) - if err != nil { - return logError(err) - } - - return retScene -} - -// associates captions to scene/s with the same basename -func (t *ScanTask) associateCaptions(ctx context.Context) { - vExt := config.GetInstance().GetVideoExtensions() - captionPath := t.file.Path() - captionLang := scene.GetCaptionsLangFromPath(captionPath) - - relatedFiles := scene.GenerateCaptionCandidates(captionPath, vExt) - if err := t.TxnManager.WithTxn(ctx, func(r models.Repository) error { - var err error - sqb := r.Scene() - - for _, scenePath := range relatedFiles { - s, er := sqb.FindByPath(scenePath) - - if er != nil { - logger.Errorf("Error searching for scene %s: %v", scenePath, er) - continue - } - if s != nil { // found related Scene - logger.Debugf("Matched captions to scene %s", s.Path) - captions, er := sqb.GetCaptions(s.ID) - if er == nil { - fileExt := filepath.Ext(captionPath) - ext := fileExt[1:] - if !scene.IsLangInCaptions(captionLang, ext, captions) { // only update captions if language code is not present - newCaption := &models.SceneCaption{ - LanguageCode: captionLang, - Filename: filepath.Base(captionPath), - CaptionType: ext, - } - captions = append(captions, newCaption) - er = sqb.UpdateCaptions(s.ID, captions) - if er == nil { - logger.Debugf("Updated captions for scene %s. Added %s", s.Path, captionLang) - } - } - } - } - } - return err - }); err != nil { - logger.Error(err.Error()) - } -} diff --git a/internal/manager/task_stash_box_tag.go b/internal/manager/task_stash_box_tag.go index 2932a3167..cf7add510 100644 --- a/internal/manager/task_stash_box_tag.go +++ b/internal/manager/task_stash_box_tag.go @@ -10,11 +10,11 @@ import ( "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scraper/stashbox" + "github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/utils" ) type StashBoxPerformerTagTask struct { - txnManager models.TransactionManager box *models.StashBox name *string performer *models.Performer @@ -41,12 +41,17 @@ func (t *StashBoxPerformerTagTask) stashBoxPerformerTag(ctx context.Context) { var performer *models.ScrapedPerformer var err error - client := stashbox.NewClient(*t.box, t.txnManager) + client := stashbox.NewClient(*t.box, instance.Repository, stashbox.Repository{ + Scene: instance.Repository.Scene, + Performer: instance.Repository.Performer, + Tag: instance.Repository.Tag, + Studio: instance.Repository.Studio, + }) if t.refresh { var performerID string - txnErr := t.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - stashids, _ := r.Performer().GetStashIDs(t.performer.ID) + txnErr := txn.WithTxn(ctx, instance.Repository, func(ctx context.Context) error { + stashids, _ := instance.Repository.Performer.GetStashIDs(ctx, t.performer.ID) for _, id := range stashids { if id.Endpoint == t.box.Endpoint { performerID = id.StashID @@ -156,11 +161,12 @@ func (t *StashBoxPerformerTagTask) stashBoxPerformerTag(ctx context.Context) { partial.URL = &value } - txnErr := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - _, err := r.Performer().Update(partial) + txnErr := txn.WithTxn(ctx, instance.Repository, func(ctx context.Context) error { + r := instance.Repository + _, err := r.Performer.Update(ctx, partial) if !t.refresh { - err = r.Performer().UpdateStashIDs(t.performer.ID, []models.StashID{ + err = r.Performer.UpdateStashIDs(ctx, t.performer.ID, []models.StashID{ { Endpoint: t.box.Endpoint, StashID: *performer.RemoteSiteID, @@ -176,7 +182,7 @@ func (t *StashBoxPerformerTagTask) stashBoxPerformerTag(ctx context.Context) { if err != nil { return err } - err = r.Performer().UpdateImage(t.performer.ID, image) + err = r.Performer.UpdateImage(ctx, t.performer.ID, image) if err != nil { return err } @@ -218,13 +224,14 @@ func (t *StashBoxPerformerTagTask) stashBoxPerformerTag(ctx context.Context) { URL: getNullString(performer.URL), UpdatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, } - err := t.txnManager.WithTxn(ctx, func(r models.Repository) error { - createdPerformer, err := r.Performer().Create(newPerformer) + err := txn.WithTxn(ctx, instance.Repository, func(ctx context.Context) error { + r := instance.Repository + createdPerformer, err := r.Performer.Create(ctx, newPerformer) if err != nil { return err } - err = r.Performer().UpdateStashIDs(createdPerformer.ID, []models.StashID{ + err = r.Performer.UpdateStashIDs(ctx, createdPerformer.ID, []models.StashID{ { Endpoint: t.box.Endpoint, StashID: *performer.RemoteSiteID, @@ -239,7 +246,7 @@ func (t *StashBoxPerformerTagTask) stashBoxPerformerTag(ctx context.Context) { if imageErr != nil { return imageErr } - err = r.Performer().UpdateImage(createdPerformer.ID, image) + err = r.Performer.UpdateImage(ctx, createdPerformer.ID, image) } return err }) diff --git a/internal/manager/task_transcode.go b/internal/manager/task_transcode.go index a3d24dcde..296042bdd 100644 --- a/internal/manager/task_transcode.go +++ b/internal/manager/task_transcode.go @@ -32,20 +32,27 @@ func (t *GenerateTranscodeTask) Start(ctc context.Context) { return } + f := t.Scene.Files.Primary() + ffprobe := instance.FFProbe var container ffmpeg.Container var err error - container, err = GetSceneFileContainer(&t.Scene) + container, err = GetVideoFileContainer(f) if err != nil { logger.Errorf("[transcode] error getting scene container: %s", err.Error()) return } - videoCodec := t.Scene.VideoCodec.String + var videoCodec string + + if f.VideoCodec != "" { + videoCodec = f.VideoCodec + } + audioCodec := ffmpeg.MissingUnsupported - if t.Scene.AudioCodec.Valid { - audioCodec = ffmpeg.ProbeAudioCodec(t.Scene.AudioCodec.String) + if f.AudioCodec != "" { + audioCodec = ffmpeg.ProbeAudioCodec(f.AudioCodec) } if !t.Force && ffmpeg.IsStreamable(videoCodec, audioCodec, container) == nil { @@ -54,7 +61,7 @@ func (t *GenerateTranscodeTask) Start(ctc context.Context) { // TODO - move transcode generation logic elsewhere - videoFile, err := ffprobe.NewVideoFile(t.Scene.Path) + videoFile, err := ffprobe.NewVideoFile(f.Path) if err != nil { logger.Errorf("[transcode] error reading video file: %s", err.Error()) return @@ -95,6 +102,11 @@ func (t *GenerateTranscodeTask) Start(ctc context.Context) { // used only when counting files to generate, doesn't affect the actual transcode generation // if container is missing from DB it is treated as non supported in order not to delay the user func (t *GenerateTranscodeTask) isTranscodeNeeded() bool { + f := t.Scene.Files.Primary() + if f == nil { + return false + } + hasTranscode := HasTranscode(&t.Scene, t.fileNamingAlgorithm) if !t.Overwrite && hasTranscode { return false @@ -104,15 +116,18 @@ func (t *GenerateTranscodeTask) isTranscodeNeeded() bool { return true } - videoCodec := t.Scene.VideoCodec.String + var videoCodec string + if f.VideoCodec != "" { + videoCodec = f.VideoCodec + } container := "" audioCodec := ffmpeg.MissingUnsupported - if t.Scene.AudioCodec.Valid { - audioCodec = ffmpeg.ProbeAudioCodec(t.Scene.AudioCodec.String) + if f.AudioCodec != "" { + audioCodec = ffmpeg.ProbeAudioCodec(f.AudioCodec) } - if t.Scene.Format.Valid { - container = t.Scene.Format.String + if f.Format != "" { + container = f.Format } if ffmpeg.IsStreamable(videoCodec, audioCodec, ffmpeg.Container(container)) == nil { diff --git a/pkg/database/custom_migrations.go b/pkg/database/custom_migrations.go deleted file mode 100644 index 340ffba55..000000000 --- a/pkg/database/custom_migrations.go +++ /dev/null @@ -1,73 +0,0 @@ -package database - -import ( - "database/sql" - "errors" - "fmt" - "strings" - - "github.com/jmoiron/sqlx" - "github.com/stashapp/stash/pkg/logger" -) - -func runCustomMigrations() error { - if err := createImagesChecksumIndex(); err != nil { - return err - } - - return nil -} - -func createImagesChecksumIndex() error { - return WithTxn(func(tx *sqlx.Tx) error { - row := tx.QueryRow("SELECT 1 AS found FROM sqlite_master WHERE type = 'index' AND name = 'images_checksum_unique'") - err := row.Err() - if err != nil && !errors.Is(err, sql.ErrNoRows) { - return err - } - - if err == nil { - var found bool - if err := row.Scan(&found); err != nil && err != sql.ErrNoRows { - return fmt.Errorf("error while scanning for index: %w", err) - } - if found { - return nil - } - } - - _, err = tx.Exec("CREATE UNIQUE INDEX images_checksum_unique ON images (checksum)") - if err == nil { - _, err = tx.Exec("DROP INDEX IF EXISTS index_images_checksum") - if err != nil { - logger.Errorf("Failed to remove surrogate images.checksum index: %s", err) - } - logger.Info("Created unique constraint on images table") - return nil - } - - _, err = tx.Exec("CREATE INDEX IF NOT EXISTS index_images_checksum ON images (checksum)") - if err != nil { - logger.Errorf("Unable to create index on images.checksum: %s", err) - } - - var result []struct { - Checksum string `db:"checksum"` - } - - err = tx.Select(&result, "SELECT checksum FROM images GROUP BY checksum HAVING COUNT(1) > 1") - if err != nil && !errors.Is(err, sql.ErrNoRows) { - logger.Errorf("Unable to determine non-unique image checksums: %s", err) - return nil - } - - checksums := make([]string, len(result)) - for i, res := range result { - checksums[i] = res.Checksum - } - - logger.Warnf("The following duplicate image checksums have been found. Please remove the duplicates and restart. %s", strings.Join(checksums, ", ")) - - return nil - }) -} diff --git a/pkg/database/database.go b/pkg/database/database.go deleted file mode 100644 index 3fa260716..000000000 --- a/pkg/database/database.go +++ /dev/null @@ -1,308 +0,0 @@ -package database - -import ( - "database/sql" - "embed" - "errors" - "fmt" - "os" - "sync" - "time" - - "github.com/fvbommel/sortorder" - "github.com/golang-migrate/migrate/v4" - sqlite3mig "github.com/golang-migrate/migrate/v4/database/sqlite3" - "github.com/golang-migrate/migrate/v4/source/iofs" - "github.com/jmoiron/sqlx" - sqlite3 "github.com/mattn/go-sqlite3" - - "github.com/stashapp/stash/pkg/fsutil" - "github.com/stashapp/stash/pkg/logger" -) - -var DB *sqlx.DB -var WriteMu sync.Mutex -var dbPath string -var appSchemaVersion uint = 31 -var databaseSchemaVersion uint - -//go:embed migrations/*.sql -var migrationsBox embed.FS - -var ( - // ErrMigrationNeeded indicates that a database migration is needed - // before the database can be initialized - ErrMigrationNeeded = errors.New("database migration required") - - // ErrDatabaseNotInitialized indicates that the database is not - // initialized, usually due to an incomplete configuration. - ErrDatabaseNotInitialized = errors.New("database not initialized") -) - -const sqlite3Driver = "sqlite3ex" - -// Ready returns an error if the database is not ready to begin transactions. -func Ready() error { - if DB == nil { - return ErrDatabaseNotInitialized - } - - return nil -} - -func init() { - // register custom driver with regexp function - registerCustomDriver() -} - -// Initialize initializes the database. If the database is new, then it -// performs a full migration to the latest schema version. Otherwise, any -// necessary migrations must be run separately using RunMigrations. -// Returns true if the database is new. -func Initialize(databasePath string) error { - dbPath = databasePath - - if err := getDatabaseSchemaVersion(); err != nil { - return fmt.Errorf("error getting database schema version: %v", err) - } - - if databaseSchemaVersion == 0 { - // new database, just run the migrations - if err := RunMigrations(); err != nil { - return fmt.Errorf("error running initial schema migrations: %v", err) - } - // RunMigrations calls Initialise. Just return - return nil - } else { - if databaseSchemaVersion > appSchemaVersion { - panic(fmt.Sprintf("Database schema version %d is incompatible with required schema version %d", databaseSchemaVersion, appSchemaVersion)) - } - - // if migration is needed, then don't open the connection - if NeedsMigration() { - logger.Warnf("Database schema version %d does not match required schema version %d.", databaseSchemaVersion, appSchemaVersion) - return nil - } - } - - const disableForeignKeys = false - DB = open(databasePath, disableForeignKeys) - - if err := runCustomMigrations(); err != nil { - return err - } - - return nil -} - -func Close() error { - WriteMu.Lock() - defer WriteMu.Unlock() - - if DB != nil { - if err := DB.Close(); err != nil { - return err - } - - DB = nil - } - - return nil -} - -func open(databasePath string, disableForeignKeys bool) *sqlx.DB { - // https://github.com/mattn/go-sqlite3 - url := "file:" + databasePath + "?_journal=WAL&_sync=NORMAL" - if !disableForeignKeys { - url += "&_fk=true" - } - - conn, err := sqlx.Open(sqlite3Driver, url) - conn.SetMaxOpenConns(25) - conn.SetMaxIdleConns(4) - conn.SetConnMaxLifetime(30 * time.Second) - if err != nil { - logger.Fatalf("db.Open(): %q\n", err) - } - - return conn -} - -func Reset(databasePath string) error { - err := DB.Close() - - if err != nil { - return errors.New("Error closing database: " + err.Error()) - } - - err = os.Remove(databasePath) - if err != nil { - return errors.New("Error removing database: " + err.Error()) - } - - // remove the -shm, -wal files ( if they exist ) - walFiles := []string{databasePath + "-shm", databasePath + "-wal"} - for _, wf := range walFiles { - if exists, _ := fsutil.FileExists(wf); exists { - err = os.Remove(wf) - if err != nil { - return errors.New("Error removing database: " + err.Error()) - } - } - } - - if err := Initialize(databasePath); err != nil { - return fmt.Errorf("[reset DB] unable to initialize: %w", err) - } - - return nil -} - -// Backup the database. If db is nil, then uses the existing database -// connection. -func Backup(db *sqlx.DB, backupPath string) error { - if db == nil { - var err error - db, err = sqlx.Connect(sqlite3Driver, "file:"+dbPath+"?_fk=true") - if err != nil { - return fmt.Errorf("open database %s failed: %v", dbPath, err) - } - defer db.Close() - } - - logger.Infof("Backing up database into: %s", backupPath) - _, err := db.Exec(`VACUUM INTO "` + backupPath + `"`) - if err != nil { - return fmt.Errorf("vacuum failed: %v", err) - } - - return nil -} - -func RestoreFromBackup(backupPath string) error { - logger.Infof("Restoring backup database %s into %s", backupPath, dbPath) - return os.Rename(backupPath, dbPath) -} - -// Migrate the database -func NeedsMigration() bool { - return databaseSchemaVersion != appSchemaVersion -} - -func AppSchemaVersion() uint { - return appSchemaVersion -} - -func DatabasePath() string { - return dbPath -} - -func DatabaseBackupPath() string { - return fmt.Sprintf("%s.%d.%s", dbPath, databaseSchemaVersion, time.Now().Format("20060102_150405")) -} - -func Version() uint { - return databaseSchemaVersion -} - -func getMigrate() (*migrate.Migrate, error) { - migrations, err := iofs.New(migrationsBox, "migrations") - if err != nil { - panic(err.Error()) - } - - const disableForeignKeys = true - conn := open(dbPath, disableForeignKeys) - - driver, err := sqlite3mig.WithInstance(conn.DB, &sqlite3mig.Config{}) - if err != nil { - return nil, err - } - - // use sqlite3Driver so that migration has access to durationToTinyInt - return migrate.NewWithInstance( - "iofs", - migrations, - dbPath, - driver, - ) -} - -func getDatabaseSchemaVersion() error { - m, err := getMigrate() - if err != nil { - return err - } - - databaseSchemaVersion, _, _ = m.Version() - m.Close() - return nil -} - -// Migrate the database -func RunMigrations() error { - m, err := getMigrate() - if err != nil { - panic(err.Error()) - } - defer m.Close() - - databaseSchemaVersion, _, _ = m.Version() - stepNumber := appSchemaVersion - databaseSchemaVersion - if stepNumber != 0 { - logger.Infof("Migrating database from version %d to %d", databaseSchemaVersion, appSchemaVersion) - err = m.Steps(int(stepNumber)) - if err != nil { - // migration failed - return err - } - } - - // re-initialise the database - if err = Initialize(dbPath); err != nil { - logger.Warnf("Error re-initializing the database: %v", err) - } - - // run a vacuum on the database - logger.Info("Performing vacuum on database") - _, err = DB.Exec("VACUUM") - if err != nil { - logger.Warnf("error while performing post-migration vacuum: %v", err) - } - - return nil -} - -func registerCustomDriver() { - sql.Register(sqlite3Driver, - &sqlite3.SQLiteDriver{ - ConnectHook: func(conn *sqlite3.SQLiteConn) error { - funcs := map[string]interface{}{ - "regexp": regexFn, - "durationToTinyInt": durationToTinyIntFn, - } - - for name, fn := range funcs { - if err := conn.RegisterFunc(name, fn, true); err != nil { - return fmt.Errorf("error registering function %s: %s", name, err.Error()) - } - } - - // COLLATE NATURAL_CS - Case sensitive natural sort - err := conn.RegisterCollation("NATURAL_CS", func(s string, s2 string) int { - if sortorder.NaturalLess(s, s2) { - return -1 - } else { - return 1 - } - }) - - if err != nil { - return fmt.Errorf("error registering natural sort collation: %v", err) - } - - return nil - }, - }, - ) -} diff --git a/pkg/database/transaction.go b/pkg/database/transaction.go deleted file mode 100644 index d8c23fb3b..000000000 --- a/pkg/database/transaction.go +++ /dev/null @@ -1,40 +0,0 @@ -package database - -import ( - "context" - - "github.com/jmoiron/sqlx" - "github.com/stashapp/stash/pkg/logger" -) - -// WithTxn executes the provided function within a transaction. It rolls back -// the transaction if the function returns an error, otherwise the transaction -// is committed. -func WithTxn(fn func(tx *sqlx.Tx) error) error { - ctx := context.TODO() - tx := DB.MustBeginTx(ctx, nil) - - var err error - defer func() { - if p := recover(); p != nil { - // a panic occurred, rollback and repanic - if err := tx.Rollback(); err != nil { - logger.Warnf("failure when performing transaction rollback: %v", err) - } - panic(p) - } - - if err != nil { - // something went wrong, rollback - if err := tx.Rollback(); err != nil { - logger.Warnf("failure when performing transaction rollback: %v", err) - } - } else { - // all good, commit - err = tx.Commit() - } - }() - - err = fn(tx) - return err -} diff --git a/pkg/ffmpeg/ffprobe.go b/pkg/ffmpeg/ffprobe.go index 6561da8d5..fc946b6c1 100644 --- a/pkg/ffmpeg/ffprobe.go +++ b/pkg/ffmpeg/ffprobe.go @@ -167,6 +167,9 @@ func parse(filePath string, probeJSON *FFProbeJSON) (*VideoFile, error) { } else { framerate, _ = strconv.ParseFloat(videoStream.AvgFrameRate, 64) } + if math.IsNaN(framerate) { + framerate = 0 + } result.FrameRate = math.Round(framerate*100) / 100 if rotate, err := strconv.ParseInt(videoStream.Tags.Rotate, 10, 64); err == nil && rotate != 180 { result.Width = videoStream.Height diff --git a/pkg/file/clean.go b/pkg/file/clean.go new file mode 100644 index 000000000..05546fec3 --- /dev/null +++ b/pkg/file/clean.go @@ -0,0 +1,411 @@ +package file + +import ( + "context" + "errors" + "fmt" + "io/fs" + + "github.com/stashapp/stash/pkg/job" + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/txn" +) + +// Cleaner scans through stored file and folder instances and removes those that are no longer present on disk. +type Cleaner struct { + FS FS + Repository Repository + + Handlers []CleanHandler +} + +type cleanJob struct { + *Cleaner + + progress *job.Progress + options CleanOptions +} + +// ScanOptions provides options for scanning files. +type CleanOptions struct { + Paths []string + + // Do a dry run. Don't delete any files + DryRun bool + + // PathFilter are used to determine if a file should be included. + // Excluded files are marked for cleaning. + PathFilter PathFilter +} + +// Clean starts the clean process. +func (s *Cleaner) Clean(ctx context.Context, options CleanOptions, progress *job.Progress) { + j := &cleanJob{ + Cleaner: s, + progress: progress, + options: options, + } + + if err := j.execute(ctx); err != nil { + logger.Errorf("error cleaning files: %v", err) + return + } +} + +type fileOrFolder struct { + fileID ID + folderID FolderID +} + +type deleteSet struct { + orderedList []fileOrFolder + fileIDSet map[ID]string + + folderIDSet map[FolderID]string +} + +func newDeleteSet() deleteSet { + return deleteSet{ + fileIDSet: make(map[ID]string), + folderIDSet: make(map[FolderID]string), + } +} + +func (s *deleteSet) add(id ID, path string) { + if _, ok := s.fileIDSet[id]; !ok { + s.orderedList = append(s.orderedList, fileOrFolder{fileID: id}) + s.fileIDSet[id] = path + } +} + +func (s *deleteSet) has(id ID) bool { + _, ok := s.fileIDSet[id] + return ok +} + +func (s *deleteSet) addFolder(id FolderID, path string) { + if _, ok := s.folderIDSet[id]; !ok { + s.orderedList = append(s.orderedList, fileOrFolder{folderID: id}) + s.folderIDSet[id] = path + } +} + +func (s *deleteSet) hasFolder(id FolderID) bool { + _, ok := s.folderIDSet[id] + return ok +} + +func (s *deleteSet) len() int { + return len(s.orderedList) +} + +func (j *cleanJob) execute(ctx context.Context) error { + progress := j.progress + + toDelete := newDeleteSet() + + var ( + fileCount int + folderCount int + ) + + if err := txn.WithTxn(ctx, j.Repository, func(ctx context.Context) error { + var err error + fileCount, err = j.Repository.CountAllInPaths(ctx, j.options.Paths) + if err != nil { + return err + } + + folderCount, err = j.Repository.FolderStore.CountAllInPaths(ctx, j.options.Paths) + if err != nil { + return err + } + + return nil + }); err != nil { + return err + } + + progress.AddTotal(fileCount + folderCount) + progress.Definite() + + if err := j.assessFiles(ctx, &toDelete); err != nil { + return err + } + + if err := j.assessFolders(ctx, &toDelete); err != nil { + return err + } + + if j.options.DryRun && toDelete.len() > 0 { + // add progress for files that would've been deleted + progress.AddProcessed(toDelete.len()) + return nil + } + + progress.ExecuteTask(fmt.Sprintf("Cleaning %d files and folders", toDelete.len()), func() { + for _, ff := range toDelete.orderedList { + if job.IsCancelled(ctx) { + return + } + + if ff.fileID != 0 { + j.deleteFile(ctx, ff.fileID, toDelete.fileIDSet[ff.fileID]) + } + if ff.folderID != 0 { + j.deleteFolder(ctx, ff.folderID, toDelete.folderIDSet[ff.folderID]) + } + + progress.Increment() + } + }) + + return nil +} + +func (j *cleanJob) assessFiles(ctx context.Context, toDelete *deleteSet) error { + const batchSize = 1000 + offset := 0 + progress := j.progress + + more := true + if err := txn.WithTxn(ctx, j.Repository, func(ctx context.Context) error { + for more { + if job.IsCancelled(ctx) { + return nil + } + + files, err := j.Repository.FindAllInPaths(ctx, j.options.Paths, batchSize, offset) + if err != nil { + return fmt.Errorf("error querying for files: %w", err) + } + + for _, f := range files { + path := f.Base().Path + err = nil + fileID := f.Base().ID + + // short-cut, don't assess if already added + if toDelete.has(fileID) { + continue + } + + progress.ExecuteTask(fmt.Sprintf("Assessing file %s for clean", path), func() { + if j.shouldClean(ctx, f) { + err = j.flagFileForDelete(ctx, toDelete, f) + } else { + // increment progress, no further processing + progress.Increment() + } + }) + if err != nil { + return err + } + } + + if len(files) != batchSize { + more = false + } else { + offset += batchSize + } + } + + return nil + }); err != nil { + return err + } + + return nil +} + +// flagFolderForDelete adds folders to the toDelete set, with the leaf folders added first +func (j *cleanJob) flagFileForDelete(ctx context.Context, toDelete *deleteSet, f File) error { + // add contained files first + containedFiles, err := j.Repository.FindByZipFileID(ctx, f.Base().ID) + if err != nil { + return fmt.Errorf("error finding contained files for %q: %w", f.Base().Path, err) + } + + for _, cf := range containedFiles { + logger.Infof("Marking contained file %q to clean", cf.Base().Path) + toDelete.add(cf.Base().ID, cf.Base().Path) + } + + // add contained folders as well + containedFolders, err := j.Repository.FolderStore.FindByZipFileID(ctx, f.Base().ID) + if err != nil { + return fmt.Errorf("error finding contained folders for %q: %w", f.Base().Path, err) + } + + for _, cf := range containedFolders { + logger.Infof("Marking contained folder %q to clean", cf.Path) + toDelete.addFolder(cf.ID, cf.Path) + } + + toDelete.add(f.Base().ID, f.Base().Path) + + return nil +} + +func (j *cleanJob) assessFolders(ctx context.Context, toDelete *deleteSet) error { + const batchSize = 1000 + offset := 0 + progress := j.progress + + more := true + if err := txn.WithTxn(ctx, j.Repository, func(ctx context.Context) error { + for more { + if job.IsCancelled(ctx) { + return nil + } + + folders, err := j.Repository.FolderStore.FindAllInPaths(ctx, j.options.Paths, batchSize, offset) + if err != nil { + return fmt.Errorf("error querying for folders: %w", err) + } + + for _, f := range folders { + path := f.Path + folderID := f.ID + + // short-cut, don't assess if already added + if toDelete.hasFolder(folderID) { + continue + } + + err = nil + progress.ExecuteTask(fmt.Sprintf("Assessing folder %s for clean", path), func() { + if j.shouldCleanFolder(ctx, f) { + if err = j.flagFolderForDelete(ctx, toDelete, f); err != nil { + return + } + } else { + // increment progress, no further processing + progress.Increment() + } + }) + if err != nil { + return err + } + } + + if len(folders) != batchSize { + more = false + } else { + offset += batchSize + } + } + + return nil + }); err != nil { + return err + } + + return nil +} + +func (j *cleanJob) flagFolderForDelete(ctx context.Context, toDelete *deleteSet, folder *Folder) error { + // it is possible that child folders may be included while parent folders are not + // so we need to check child folders separately + toDelete.addFolder(folder.ID, folder.Path) + + return nil +} + +func (j *cleanJob) shouldClean(ctx context.Context, f File) bool { + path := f.Base().Path + + info, err := f.Base().Info(j.FS) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + logger.Errorf("error getting file info for %q, not cleaning: %v", path, err) + return false + } + + if info == nil { + // info is nil - file not exist + logger.Infof("File not found. Marking to clean: \"%s\"", path) + return true + } + + // run through path filter, if returns false then the file should be cleaned + filter := j.options.PathFilter + + // don't log anything - assume filter will have logged the reason + return !filter.Accept(ctx, path, info) +} + +func (j *cleanJob) shouldCleanFolder(ctx context.Context, f *Folder) bool { + path := f.Path + + info, err := f.Info(j.FS) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + logger.Errorf("error getting folder info for %q, not cleaning: %v", path, err) + return false + } + + if info == nil { + // info is nil - file not exist + logger.Infof("Folder not found. Marking to clean: \"%s\"", path) + return true + } + + // run through path filter, if returns false then the file should be cleaned + filter := j.options.PathFilter + + // don't log anything - assume filter will have logged the reason + return !filter.Accept(ctx, path, info) +} + +func (j *cleanJob) deleteFile(ctx context.Context, fileID ID, fn string) { + // delete associated objects + fileDeleter := NewDeleter() + if err := txn.WithTxn(ctx, j.Repository, func(ctx context.Context) error { + fileDeleter.RegisterHooks(ctx, j.Repository) + + if err := j.fireHandlers(ctx, fileDeleter, fileID); err != nil { + return err + } + + return j.Repository.Destroy(ctx, fileID) + }); err != nil { + logger.Errorf("Error deleting file %q from database: %s", fn, err.Error()) + return + } +} + +func (j *cleanJob) deleteFolder(ctx context.Context, folderID FolderID, fn string) { + // delete associated objects + fileDeleter := NewDeleter() + if err := txn.WithTxn(ctx, j.Repository, func(ctx context.Context) error { + fileDeleter.RegisterHooks(ctx, j.Repository) + + if err := j.fireFolderHandlers(ctx, fileDeleter, folderID); err != nil { + return err + } + + return j.Repository.FolderStore.Destroy(ctx, folderID) + }); err != nil { + logger.Errorf("Error deleting folder %q from database: %s", fn, err.Error()) + return + } +} + +func (j *cleanJob) fireHandlers(ctx context.Context, fileDeleter *Deleter, fileID ID) error { + for _, h := range j.Handlers { + if err := h.HandleFile(ctx, fileDeleter, fileID); err != nil { + return err + } + } + + return nil +} + +func (j *cleanJob) fireFolderHandlers(ctx context.Context, fileDeleter *Deleter, folderID FolderID) error { + for _, h := range j.Handlers { + if err := h.HandleFolder(ctx, fileDeleter, folderID); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/file/delete.go b/pkg/file/delete.go index 7cfd78b19..badbb5096 100644 --- a/pkg/file/delete.go +++ b/pkg/file/delete.go @@ -1,12 +1,14 @@ package file import ( + "context" "errors" "fmt" "io/fs" "os" "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/txn" ) const deleteFileSuffix = ".delete" @@ -66,6 +68,19 @@ func NewDeleter() *Deleter { } } +// RegisterHooks registers post-commit and post-rollback hooks. +func (d *Deleter) RegisterHooks(ctx context.Context, mgr txn.Manager) { + mgr.AddPostCommitHook(ctx, func(ctx context.Context) error { + d.Commit() + return nil + }) + + mgr.AddPostRollbackHook(ctx, func(ctx context.Context) error { + d.Rollback() + return nil + }) +} + // Files designates files to be deleted. Each file marked will be renamed to add // a `.delete` suffix. An error is returned if a file could not be renamed. // Note that if an error is returned, then some files may be left renamed. @@ -159,3 +174,61 @@ func (d *Deleter) renameForDelete(path string) error { func (d *Deleter) renameForRestore(path string) error { return d.RenamerRemover.Rename(path+deleteFileSuffix, path) } + +func Destroy(ctx context.Context, destroyer Destroyer, f File, fileDeleter *Deleter, deleteFile bool) error { + if err := destroyer.Destroy(ctx, f.Base().ID); err != nil { + return err + } + + // don't delete files in zip files + if deleteFile && f.Base().ZipFileID == nil { + if err := fileDeleter.Files([]string{f.Base().Path}); err != nil { + return err + } + } + + return nil +} + +type ZipDestroyer struct { + FileDestroyer GetterDestroyer + FolderDestroyer FolderGetterDestroyer +} + +func (d *ZipDestroyer) DestroyZip(ctx context.Context, f File, fileDeleter *Deleter, deleteFile bool) error { + // destroy contained files + files, err := d.FileDestroyer.FindByZipFileID(ctx, f.Base().ID) + if err != nil { + return err + } + + for _, ff := range files { + if err := d.FileDestroyer.Destroy(ctx, ff.Base().ID); err != nil { + return err + } + } + + // destroy contained folders + folders, err := d.FolderDestroyer.FindByZipFileID(ctx, f.Base().ID) + if err != nil { + return err + } + + for _, ff := range folders { + if err := d.FolderDestroyer.Destroy(ctx, ff.ID); err != nil { + return err + } + } + + if err := d.FileDestroyer.Destroy(ctx, f.Base().ID); err != nil { + return err + } + + if deleteFile { + if err := fileDeleter.Files([]string{f.Base().Path}); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/file/file.go b/pkg/file/file.go index 397dabd6d..3b83e4946 100644 --- a/pkg/file/file.go +++ b/pkg/file/file.go @@ -1,31 +1,210 @@ package file import ( + "context" "io" "io/fs" - "os" + "net/http" + "strconv" + "time" + + "github.com/stashapp/stash/pkg/logger" ) -type fsFile struct { - path string - info fs.FileInfo +// ID represents an ID of a file. +type ID int32 + +func (i ID) String() string { + return strconv.Itoa(int(i)) } -func (f *fsFile) Open() (io.ReadCloser, error) { - return os.Open(f.path) +// DirEntry represents a file or directory in the file system. +type DirEntry struct { + ZipFileID *ID `json:"zip_file_id"` + + // transient - not persisted + // only guaranteed to have id, path and basename set + ZipFile File + + ModTime time.Time `json:"mod_time"` } -func (f *fsFile) Path() string { - return f.path +func (e *DirEntry) info(fs FS, path string) (fs.FileInfo, error) { + if e.ZipFile != nil { + zipPath := e.ZipFile.Base().Path + zfs, err := fs.OpenZip(zipPath) + if err != nil { + return nil, err + } + defer zfs.Close() + fs = zfs + } + // else assume os file + + ret, err := fs.Lstat(path) + return ret, err } -func (f *fsFile) FileInfo() fs.FileInfo { - return f.info +// File represents a file in the file system. +type File interface { + Base() *BaseFile + SetFingerprints(fp []Fingerprint) + Open(fs FS) (io.ReadCloser, error) } -func FSFile(path string, info fs.FileInfo) SourceFile { - return &fsFile{ - path: path, - info: info, +// BaseFile represents a file in the file system. +type BaseFile struct { + ID ID `json:"id"` + + DirEntry + + // resolved from parent folder and basename only - not stored in DB + Path string `json:"path"` + + Basename string `json:"basename"` + ParentFolderID FolderID `json:"parent_folder_id"` + + Fingerprints Fingerprints `json:"fingerprints"` + + Size int64 `json:"size"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// SetFingerprints sets the fingerprints of the file. +// If a fingerprint of the same type already exists, it is overwritten. +func (f *BaseFile) SetFingerprints(fp []Fingerprint) { + for _, v := range fp { + f.SetFingerprint(v) } } + +// SetFingerprint sets the fingerprint of the file. +// If a fingerprint of the same type already exists, it is overwritten. +func (f *BaseFile) SetFingerprint(fp Fingerprint) { + for i, existing := range f.Fingerprints { + if existing.Type == fp.Type { + f.Fingerprints[i] = fp + return + } + } + + f.Fingerprints = append(f.Fingerprints, fp) +} + +// Base is used to fulfil the File interface. +func (f *BaseFile) Base() *BaseFile { + return f +} + +func (f *BaseFile) Open(fs FS) (io.ReadCloser, error) { + if f.ZipFile != nil { + zipPath := f.ZipFile.Base().Path + zfs, err := fs.OpenZip(zipPath) + if err != nil { + return nil, err + } + + return zfs.OpenOnly(f.Path) + } + + return fs.Open(f.Path) +} + +func (f *BaseFile) Info(fs FS) (fs.FileInfo, error) { + return f.info(fs, f.Path) +} + +func (f *BaseFile) Serve(fs FS, w http.ResponseWriter, r *http.Request) { + w.Header().Add("Cache-Control", "max-age=604800000") // 1 Week + + reader, err := f.Open(fs) + if err != nil { + // assume not found + http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) + return + } + + defer reader.Close() + + rsc, ok := reader.(io.ReadSeeker) + if !ok { + // fallback to direct copy + data, err := io.ReadAll(reader) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if k, err := w.Write(data); err != nil { + logger.Warnf("failure while serving image (wrote %v bytes out of %v): %v", k, len(data), err) + } + + return + } + + http.ServeContent(w, r, f.Basename, f.ModTime, rsc) +} + +type Finder interface { + Find(ctx context.Context, id ...ID) ([]File, error) +} + +// Getter provides methods to find Files. +type Getter interface { + FindByPath(ctx context.Context, path string) (File, error) + FindByFingerprint(ctx context.Context, fp Fingerprint) ([]File, error) + FindByZipFileID(ctx context.Context, zipFileID ID) ([]File, error) + FindAllInPaths(ctx context.Context, p []string, limit, offset int) ([]File, error) +} + +type Counter interface { + CountAllInPaths(ctx context.Context, p []string) (int, error) +} + +// Creator provides methods to create Files. +type Creator interface { + Create(ctx context.Context, f File) error +} + +// Updater provides methods to update Files. +type Updater interface { + Update(ctx context.Context, f File) error +} + +type Destroyer interface { + Destroy(ctx context.Context, id ID) error +} + +type GetterDestroyer interface { + Getter + Destroyer +} + +// Store provides methods to find, create and update Files. +type Store interface { + Getter + Counter + Creator + Updater + Destroyer +} + +// Decorator wraps the Decorate method to add additional functionality while scanning files. +type Decorator interface { + Decorate(ctx context.Context, fs FS, f File) (File, error) +} + +type FilteredDecorator struct { + Decorator + Filter +} + +// Decorate runs the decorator if the filter accepts the file. +func (d *FilteredDecorator) Decorate(ctx context.Context, fs FS, f File) (File, error) { + if d.Accept(ctx, f) { + return d.Decorator.Decorate(ctx, fs, f) + } + return f, nil +} diff --git a/pkg/file/fingerprint.go b/pkg/file/fingerprint.go new file mode 100644 index 000000000..fab858e4e --- /dev/null +++ b/pkg/file/fingerprint.go @@ -0,0 +1,63 @@ +package file + +var ( + FingerprintTypeOshash = "oshash" + FingerprintTypeMD5 = "md5" + FingerprintTypePhash = "phash" +) + +// Fingerprint represents a fingerprint of a file. +type Fingerprint struct { + Type string + Fingerprint interface{} +} + +type Fingerprints []Fingerprint + +func (f Fingerprints) Get(type_ string) interface{} { + for _, fp := range f { + if fp.Type == type_ { + return fp.Fingerprint + } + } + + return nil +} + +func (f Fingerprints) GetString(type_ string) string { + fp := f.Get(type_) + if fp != nil { + s, _ := fp.(string) + return s + } + + return "" +} + +func (f Fingerprints) GetInt64(type_ string) int64 { + fp := f.Get(type_) + if fp != nil { + v, _ := fp.(int64) + return v + } + + return 0 +} + +// AppendUnique appends a fingerprint to the list if a Fingerprint of the same type does not already exist in the list. If one does, then it is updated with o's Fingerprint value. +func (f Fingerprints) AppendUnique(o Fingerprint) Fingerprints { + ret := f + for i, fp := range ret { + if fp.Type == o.Type { + ret[i] = o + return ret + } + } + + return append(f, o) +} + +// FingerprintCalculator calculates a fingerprint for the provided file. +type FingerprintCalculator interface { + CalculateFingerprints(f *BaseFile, o Opener) ([]Fingerprint, error) +} diff --git a/pkg/file/folder.go b/pkg/file/folder.go new file mode 100644 index 000000000..2eb4edd12 --- /dev/null +++ b/pkg/file/folder.go @@ -0,0 +1,71 @@ +package file + +import ( + "context" + "io/fs" + "strconv" + "time" +) + +// FolderID represents an ID of a folder. +type FolderID int32 + +// String converts the ID to a string. +func (i FolderID) String() string { + return strconv.Itoa(int(i)) +} + +// Folder represents a folder in the file system. +type Folder struct { + ID FolderID `json:"id"` + DirEntry + Path string `json:"path"` + ParentFolderID *FolderID `json:"parent_folder_id"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (f *Folder) Info(fs FS) (fs.FileInfo, error) { + return f.info(fs, f.Path) +} + +// FolderGetter provides methods to find Folders. +type FolderGetter interface { + FindByPath(ctx context.Context, path string) (*Folder, error) + FindByZipFileID(ctx context.Context, zipFileID ID) ([]*Folder, error) + FindAllInPaths(ctx context.Context, p []string, limit, offset int) ([]*Folder, error) + FindByParentFolderID(ctx context.Context, parentFolderID FolderID) ([]*Folder, error) +} + +type FolderCounter interface { + CountAllInPaths(ctx context.Context, p []string) (int, error) +} + +// FolderCreator provides methods to create Folders. +type FolderCreator interface { + Create(ctx context.Context, f *Folder) error +} + +// FolderUpdater provides methods to update Folders. +type FolderUpdater interface { + Update(ctx context.Context, f *Folder) error +} + +type FolderDestroyer interface { + Destroy(ctx context.Context, id FolderID) error +} + +type FolderGetterDestroyer interface { + FolderGetter + FolderDestroyer +} + +// FolderStore provides methods to find, create and update Folders. +type FolderStore interface { + FolderGetter + FolderCounter + FolderCreator + FolderUpdater + FolderDestroyer +} diff --git a/pkg/file/fs.go b/pkg/file/fs.go new file mode 100644 index 000000000..45d650fdf --- /dev/null +++ b/pkg/file/fs.go @@ -0,0 +1,48 @@ +package file + +import ( + "io" + "io/fs" + "os" +) + +// Opener provides an interface to open a file. +type Opener interface { + Open() (io.ReadCloser, error) +} + +type fsOpener struct { + fs FS + name string +} + +func (o *fsOpener) Open() (io.ReadCloser, error) { + return o.fs.Open(o.name) +} + +// FS represents a file system. +type FS interface { + Lstat(name string) (fs.FileInfo, error) + Open(name string) (fs.ReadDirFile, error) + OpenZip(name string) (*ZipFS, error) +} + +// OsFS is a file system backed by the OS. +type OsFS struct{} + +func (f *OsFS) Lstat(name string) (fs.FileInfo, error) { + return os.Lstat(name) +} + +func (f *OsFS) Open(name string) (fs.ReadDirFile, error) { + return os.Open(name) +} + +func (f *OsFS) OpenZip(name string) (*ZipFS, error) { + info, err := f.Lstat(name) + if err != nil { + return nil, err + } + + return newZipFS(f, name, info) +} diff --git a/pkg/file/handler.go b/pkg/file/handler.go new file mode 100644 index 000000000..b51b2a76a --- /dev/null +++ b/pkg/file/handler.go @@ -0,0 +1,53 @@ +package file + +import ( + "context" + "io/fs" +) + +// PathFilter provides a filter function for paths. +type PathFilter interface { + Accept(ctx context.Context, path string, info fs.FileInfo) bool +} + +type PathFilterFunc func(path string) bool + +func (pff PathFilterFunc) Accept(path string) bool { + return pff(path) +} + +// Filter provides a filter function for Files. +type Filter interface { + Accept(ctx context.Context, f File) bool +} + +type FilterFunc func(ctx context.Context, f File) bool + +func (ff FilterFunc) Accept(ctx context.Context, f File) bool { + return ff(ctx, f) +} + +// Handler provides a handler for Files. +type Handler interface { + Handle(ctx context.Context, f File) error +} + +// FilteredHandler is a Handler runs only if the filter accepts the file. +type FilteredHandler struct { + Handler + Filter +} + +// Handle runs the handler if the filter accepts the file. +func (h *FilteredHandler) Handle(ctx context.Context, f File) error { + if h.Accept(ctx, f) { + return h.Handler.Handle(ctx, f) + } + return nil +} + +// CleanHandler provides a handler for cleaning Files and Folders. +type CleanHandler interface { + HandleFile(ctx context.Context, fileDeleter *Deleter, fileID ID) error + HandleFolder(ctx context.Context, fileDeleter *Deleter, folderID FolderID) error +} diff --git a/pkg/file/hash.go b/pkg/file/hash.go deleted file mode 100644 index 67998a265..000000000 --- a/pkg/file/hash.go +++ /dev/null @@ -1,18 +0,0 @@ -package file - -import ( - "io" - - "github.com/stashapp/stash/pkg/hash/md5" - "github.com/stashapp/stash/pkg/hash/oshash" -) - -type FSHasher struct{} - -func (h *FSHasher) OSHash(src io.ReadSeeker, size int64) (string, error) { - return oshash.FromReader(src, size) -} - -func (h *FSHasher) MD5(src io.Reader) (string, error) { - return md5.FromReader(src) -} diff --git a/pkg/file/image/scan.go b/pkg/file/image/scan.go new file mode 100644 index 000000000..2de4bbcea --- /dev/null +++ b/pkg/file/image/scan.go @@ -0,0 +1,39 @@ +package image + +import ( + "context" + "fmt" + "image" + + _ "image/gif" + _ "image/jpeg" + _ "image/png" + + "github.com/stashapp/stash/pkg/file" + _ "golang.org/x/image/webp" +) + +// Decorator adds image specific fields to a File. +type Decorator struct { +} + +func (d *Decorator) Decorate(ctx context.Context, fs file.FS, f file.File) (file.File, error) { + base := f.Base() + r, err := fs.Open(base.Path) + if err != nil { + return f, fmt.Errorf("reading image file %q: %w", base.Path, err) + } + defer r.Close() + + c, format, err := image.DecodeConfig(r) + if err != nil { + return f, fmt.Errorf("decoding image file %q: %w", base.Path, err) + } + + return &file.ImageFile{ + BaseFile: base, + Format: format, + Width: c.Width, + Height: c.Height, + }, nil +} diff --git a/pkg/file/image_file.go b/pkg/file/image_file.go new file mode 100644 index 000000000..4e1f5690a --- /dev/null +++ b/pkg/file/image_file.go @@ -0,0 +1,9 @@ +package file + +// ImageFile is an extension of BaseFile to represent image files. +type ImageFile struct { + *BaseFile + Format string `json:"format"` + Width int `json:"width"` + Height int `json:"height"` +} diff --git a/pkg/file/scan.go b/pkg/file/scan.go index 672fee853..f9c972bcc 100644 --- a/pkg/file/scan.go +++ b/pkg/file/scan.go @@ -1,190 +1,943 @@ package file import ( + "context" + "errors" "fmt" - "io" "io/fs" - "os" - "strconv" + "path/filepath" + "strings" + "sync" "time" + "github.com/remeh/sizedwaitgroup" "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" ) -type SourceFile interface { - Open() (io.ReadCloser, error) - Path() string - FileInfo() fs.FileInfo +const ( + scanQueueSize = 200000 + // maximum number of times to retry in the event of a locked database + // use -1 to retry forever + maxRetries = -1 +) + +// Repository provides access to storage methods for files and folders. +type Repository struct { + txn.Manager + txn.DatabaseProvider + Store + + FolderStore FolderStore } -type FileBased interface { - File() models.File +// Scanner scans files into the database. +// +// The scan process works using two goroutines. The first walks through the provided paths +// in the filesystem. It runs each directory entry through the provided ScanFilters. If none +// of the filter Accept methods return true, then the file/directory is ignored. +// Any folders found are handled immediately. Files inside zip files are also handled immediately. +// All other files encountered are sent to the second goroutine queue. +// +// Folders are handled by checking if the folder exists in the database, by its full path. +// If a folder entry already exists, then its mod time is updated (if applicable). +// If the folder does not exist in the database, then a new folder entry its created. +// +// Files are handled by first querying for the file by its path. If the file entry exists in the +// database, then the mod time is compared to the value in the database. If the mod time is different +// then file is marked as updated - it recalculates any fingerprints and fires decorators, then +// the file entry is updated and any applicable handlers are fired. +// +// If the file entry does not exist in the database, then fingerprints are calculated for the file. +// It then determines if the file is a rename of an existing file by querying for file entries with +// the same fingerprint. If any are found, it checks each to see if any are missing in the file +// system. If one is, then the file is treated as renamed and its path is updated. If none are missing, +// or many are, then the file is treated as a new file. +// +// If the file is not a renamed file, then the decorators are fired and the file is created, then +// the applicable handlers are fired. +type Scanner struct { + FS FS + Repository Repository + FingerprintCalculator FingerprintCalculator + + // FileDecorators are applied to files as they are scanned. + FileDecorators []Decorator } -type Hasher interface { - OSHash(src io.ReadSeeker, size int64) (string, error) - MD5(src io.Reader) (string, error) +// ProgressReporter is used to report progress of the scan. +type ProgressReporter interface { + AddTotal(total int) + Increment() + Definite() + ExecuteTask(description string, fn func()) } -type Scanned struct { - Old *models.File - New *models.File +type scanJob struct { + *Scanner + + // handlers are called after a file has been scanned. + handlers []Handler + + ProgressReports ProgressReporter + options ScanOptions + + startTime time.Time + fileQueue chan scanFile + dbQueue chan func(ctx context.Context) error + retryList []scanFile + retrying bool + folderPathToID sync.Map + zipPathToID sync.Map + count int + + txnRetryer txn.Retryer } -// FileUpdated returns true if both old and new files are present and not equal. -func (s Scanned) FileUpdated() bool { - if s.Old == nil || s.New == nil { - return false +// ScanOptions provides options for scanning files. +type ScanOptions struct { + Paths []string + + // ZipFileExtensions is a list of file extensions that are considered zip files. + // Extension does not include the . character. + ZipFileExtensions []string + + // ScanFilters are used to determine if a file should be scanned. + ScanFilters []PathFilter + + // HandlerRequiredFilters are used to determine if an unchanged file needs to be handled + HandlerRequiredFilters []Filter + + ParallelTasks int +} + +// Scan starts the scanning process. +func (s *Scanner) Scan(ctx context.Context, handlers []Handler, options ScanOptions, progressReporter ProgressReporter) { + job := &scanJob{ + Scanner: s, + handlers: handlers, + ProgressReports: progressReporter, + options: options, + txnRetryer: txn.Retryer{ + Manager: s.Repository, + Retries: maxRetries, + }, } - return !s.Old.Equal(*s.New) + job.execute(ctx) } -// ContentsChanged returns true if both old and new files are present and the file content is different. -func (s Scanned) ContentsChanged() bool { - if s.Old == nil || s.New == nil { - return false +type scanFile struct { + *BaseFile + fs FS + info fs.FileInfo + zipFile *scanFile +} + +func (s *scanJob) withTxn(ctx context.Context, fn func(ctx context.Context) error) error { + return s.txnRetryer.WithTxn(ctx, fn) +} + +func (s *scanJob) withDB(ctx context.Context, fn func(ctx context.Context) error) error { + return txn.WithDatabase(ctx, s.Repository, fn) +} + +func (s *scanJob) execute(ctx context.Context) { + paths := s.options.Paths + logger.Infof("scanning %d paths", len(paths)) + s.startTime = time.Now() + + s.fileQueue = make(chan scanFile, scanQueueSize) + s.dbQueue = make(chan func(ctx context.Context) error, scanQueueSize) + + go func() { + if err := s.queueFiles(ctx, paths); err != nil { + if errors.Is(err, context.Canceled) { + return + } + + logger.Errorf("error queuing files for scan: %v", err) + return + } + + logger.Infof("Finished adding files to queue. %d files queued", s.count) + }() + + if err := s.processQueue(ctx); err != nil { + if errors.Is(err, context.Canceled) { + return + } + + logger.Errorf("error scanning files: %v", err) + return + } +} + +func (s *scanJob) queueFiles(ctx context.Context, paths []string) error { + var err error + s.ProgressReports.ExecuteTask("Walking directory tree", func() { + for _, p := range paths { + err = symWalk(s.FS, p, s.queueFileFunc(ctx, s.FS, nil)) + if err != nil { + return + } + } + }) + + close(s.fileQueue) + + if s.ProgressReports != nil { + s.ProgressReports.AddTotal(s.count) + s.ProgressReports.Definite() } - if s.Old.Checksum != s.New.Checksum { - return true + return err +} + +func (s *scanJob) queueFileFunc(ctx context.Context, f FS, zipFile *scanFile) fs.WalkDirFunc { + return func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + if err = ctx.Err(); err != nil { + return err + } + + info, err := d.Info() + if err != nil { + return fmt.Errorf("reading info for %q: %w", path, err) + } + + if !s.acceptEntry(ctx, path, info) { + if info.IsDir() { + return fs.SkipDir + } + + return nil + } + + ff := scanFile{ + BaseFile: &BaseFile{ + DirEntry: DirEntry{ + ModTime: modTime(info), + }, + Path: path, + Basename: filepath.Base(path), + Size: info.Size(), + }, + fs: f, + info: info, + // there is no guarantee that the zip file has been scanned + // so we can't just plug in the id. + zipFile: zipFile, + } + + if info.IsDir() { + // handle folders immediately + if err := s.handleFolder(ctx, ff); err != nil { + if !errors.Is(err, context.Canceled) { + logger.Errorf("error processing %q: %v", path, err) + } + + // skip the directory since we won't be able to process the files anyway + return fs.SkipDir + } + + return nil + } + + // if zip file is present, we handle immediately + if zipFile != nil { + s.ProgressReports.ExecuteTask("Scanning "+path, func() { + if err := s.handleFile(ctx, ff); err != nil { + if !errors.Is(err, context.Canceled) { + logger.Errorf("error processing %q: %v", path, err) + } + // don't return an error, just skip the file + } + }) + + return nil + } + + s.fileQueue <- ff + + s.count++ + + return nil + } +} + +func (s *scanJob) acceptEntry(ctx context.Context, path string, info fs.FileInfo) bool { + // always accept if there's no filters + accept := len(s.options.ScanFilters) == 0 + for _, filter := range s.options.ScanFilters { + // accept if any filter accepts the file + if filter.Accept(ctx, path, info) { + accept = true + break + } } - if s.Old.OSHash != s.New.OSHash { - return true + return accept +} + +func (s *scanJob) scanZipFile(ctx context.Context, f scanFile) error { + zipFS, err := f.fs.OpenZip(f.Path) + if err != nil { + if errors.Is(err, errNotReaderAt) { + // can't walk the zip file + // just return + return nil + } + + return err + } + + defer zipFS.Close() + + return symWalk(zipFS, f.Path, s.queueFileFunc(ctx, zipFS, &f)) +} + +func (s *scanJob) processQueue(ctx context.Context) error { + parallelTasks := s.options.ParallelTasks + if parallelTasks < 1 { + parallelTasks = 1 + } + + wg := sizedwaitgroup.New(parallelTasks) + + for f := range s.fileQueue { + if err := ctx.Err(); err != nil { + return err + } + + wg.Add() + ff := f + go func() { + defer wg.Done() + s.processQueueItem(ctx, ff) + }() + } + + wg.Wait() + s.retrying = true + for _, f := range s.retryList { + if err := ctx.Err(); err != nil { + return err + } + + wg.Add() + ff := f + go func() { + defer wg.Done() + s.processQueueItem(ctx, ff) + }() + } + + wg.Wait() + + close(s.dbQueue) + + return nil +} + +func (s *scanJob) incrementProgress(f scanFile) { + // don't increment for files inside zip files since these aren't + // counted during the initial walking + if s.ProgressReports != nil && f.zipFile == nil { + s.ProgressReports.Increment() + } +} + +func (s *scanJob) processQueueItem(ctx context.Context, f scanFile) { + s.ProgressReports.ExecuteTask("Scanning "+f.Path, func() { + var err error + if f.info.IsDir() { + err = s.handleFolder(ctx, f) + } else { + err = s.handleFile(ctx, f) + } + + if err != nil && !errors.Is(err, context.Canceled) { + logger.Errorf("error processing %q: %v", f.Path, err) + } + }) +} + +func (s *scanJob) getFolderID(ctx context.Context, path string) (*FolderID, error) { + // check the folder cache first + if f, ok := s.folderPathToID.Load(path); ok { + v := f.(FolderID) + return &v, nil + } + + ret, err := s.Repository.FolderStore.FindByPath(ctx, path) + if err != nil { + return nil, err + } + + if ret == nil { + return nil, nil + } + + s.folderPathToID.Store(path, ret.ID) + return &ret.ID, nil +} + +func (s *scanJob) getZipFileID(ctx context.Context, zipFile *scanFile) (*ID, error) { + if zipFile == nil { + return nil, nil + } + + if zipFile.ID != 0 { + return &zipFile.ID, nil + } + + path := zipFile.Path + + // check the folder cache first + if f, ok := s.zipPathToID.Load(path); ok { + v := f.(ID) + return &v, nil + } + + ret, err := s.Repository.FindByPath(ctx, path) + if err != nil { + return nil, fmt.Errorf("getting zip file ID for %q: %w", path, err) + } + + if ret == nil { + return nil, fmt.Errorf("zip file %q doesn't exist in database", zipFile.Path) + } + + s.zipPathToID.Store(path, ret.Base().ID) + return &ret.Base().ID, nil +} + +func (s *scanJob) handleFolder(ctx context.Context, file scanFile) error { + path := file.Path + + return s.withTxn(ctx, func(ctx context.Context) error { + defer s.incrementProgress(file) + + // determine if folder already exists in data store (by path) + f, err := s.Repository.FolderStore.FindByPath(ctx, path) + if err != nil { + return fmt.Errorf("checking for existing folder %q: %w", path, err) + } + + // if folder not exists, create it + if f == nil { + f, err = s.onNewFolder(ctx, file) + } else { + f, err = s.onExistingFolder(ctx, file, f) + } + + if err != nil { + return err + } + + if f != nil { + s.folderPathToID.Store(f.Path, f.ID) + } + + return nil + }) +} + +func (s *scanJob) onNewFolder(ctx context.Context, file scanFile) (*Folder, error) { + now := time.Now() + + toCreate := &Folder{ + DirEntry: DirEntry{ + ModTime: file.ModTime, + }, + Path: file.Path, + CreatedAt: now, + UpdatedAt: now, + } + + zipFileID, err := s.getZipFileID(ctx, file.zipFile) + if err != nil { + return nil, err + } + + if zipFileID != nil { + toCreate.ZipFileID = zipFileID + } + + dir := filepath.Dir(file.Path) + if dir != "." { + parentFolderID, err := s.getFolderID(ctx, dir) + if err != nil { + return nil, fmt.Errorf("getting parent folder %q: %w", dir, err) + } + + // if parent folder doesn't exist, assume it's a top-level folder + // this may not be true if we're using multiple goroutines + if parentFolderID != nil { + toCreate.ParentFolderID = parentFolderID + } + } + + logger.Infof("%s doesn't exist. Creating new folder entry...", file.Path) + if err := s.Repository.FolderStore.Create(ctx, toCreate); err != nil { + return nil, fmt.Errorf("creating folder %q: %w", file.Path, err) + } + + return toCreate, nil +} + +func (s *scanJob) onExistingFolder(ctx context.Context, f scanFile, existing *Folder) (*Folder, error) { + // check if the mod time is changed + entryModTime := f.ModTime + + if !entryModTime.Equal(existing.ModTime) { + // update entry in store + existing.ModTime = entryModTime + + var err error + if err = s.Repository.FolderStore.Update(ctx, existing); err != nil { + return nil, fmt.Errorf("updating folder %q: %w", f.Path, err) + } + } + + return existing, nil +} + +func modTime(info fs.FileInfo) time.Time { + // truncate to seconds, since we don't store beyond that in the database + return info.ModTime().Truncate(time.Second) +} + +func (s *scanJob) handleFile(ctx context.Context, f scanFile) error { + var ff File + // don't use a transaction to check if new or existing + if err := s.withDB(ctx, func(ctx context.Context) error { + // determine if file already exists in data store + var err error + ff, err = s.Repository.FindByPath(ctx, f.Path) + if err != nil { + return fmt.Errorf("checking for existing file %q: %w", f.Path, err) + } + + if ff == nil { + ff, err = s.onNewFile(ctx, f) + return err + } + + ff, err = s.onExistingFile(ctx, f, ff) + return err + }); err != nil { + return err + } + + if ff != nil && s.isZipFile(f.info.Name()) { + f.BaseFile = ff.Base() + + // scan zip files with a different context that is not cancellable + // cancelling while scanning zip file contents results in the scan + // contents being partially completed + zipCtx := context.Background() + + if err := s.scanZipFile(zipCtx, f); err != nil { + logger.Errorf("Error scanning zip file %q: %v", f.Path, err) + } + } + + return nil +} + +func (s *scanJob) isZipFile(path string) bool { + fExt := filepath.Ext(path) + for _, ext := range s.options.ZipFileExtensions { + if strings.EqualFold(fExt, "."+ext) { + return true + } } return false } -type Scanner struct { - Hasher Hasher +func (s *scanJob) onNewFile(ctx context.Context, f scanFile) (File, error) { + now := time.Now() - CalculateMD5 bool - CalculateOSHash bool -} + baseFile := f.BaseFile + path := baseFile.Path -func (o Scanner) ScanExisting(existing FileBased, file SourceFile) (h *Scanned, err error) { - info := file.FileInfo() - h = &Scanned{} + baseFile.CreatedAt = now + baseFile.UpdatedAt = now - existingFile := existing.File() - h.Old = &existingFile + // find the parent folder + parentFolderID, err := s.getFolderID(ctx, filepath.Dir(path)) + if err != nil { + return nil, fmt.Errorf("getting parent folder for %q: %w", path, err) + } - updatedFile := existingFile - h.New = &updatedFile + if parentFolderID == nil { + // if parent folder doesn't exist, assume it's not yet created + // add this file to the queue to be created later + if s.retrying { + // if we're retrying and the folder still doesn't exist, then it's a problem + s.incrementProgress(f) + return nil, fmt.Errorf("parent folder for %q doesn't exist", path) + } - // update existing data if needed - // truncate to seconds, since we don't store beyond that in the database - updatedFile.FileModTime = info.ModTime().Truncate(time.Second) - updatedFile.Size = strconv.FormatInt(info.Size(), 10) + s.retryList = append(s.retryList, f) + return nil, nil + } - modTimeChanged := !existingFile.FileModTime.Equal(updatedFile.FileModTime) + baseFile.ParentFolderID = *parentFolderID - // regenerate hash(es) if missing or file mod time changed - if _, err = o.generateHashes(&updatedFile, file, modTimeChanged); err != nil { + zipFileID, err := s.getZipFileID(ctx, f.zipFile) + if err != nil { + s.incrementProgress(f) return nil, err } - // notify of changes as needed - // object exists, no further processing required - return -} - -func (o Scanner) ScanNew(file SourceFile) (*models.File, error) { - info := file.FileInfo() - sizeStr := strconv.FormatInt(info.Size(), 10) - modTime := info.ModTime() - f := models.File{ - Path: file.Path(), - Size: sizeStr, - FileModTime: modTime, + if zipFileID != nil { + baseFile.ZipFileID = zipFileID } - if _, err := o.generateHashes(&f, file, true); err != nil { + fp, err := s.calculateFingerprints(f.fs, baseFile, path) + if err != nil { + s.incrementProgress(f) return nil, err } - return &f, nil + baseFile.SetFingerprints(fp) + + file, err := s.fireDecorators(ctx, f.fs, baseFile) + if err != nil { + s.incrementProgress(f) + return nil, err + } + + // determine if the file is renamed from an existing file in the store + // do this after decoration so that missing fields can be populated + renamed, err := s.handleRename(ctx, file, fp) + if err != nil { + s.incrementProgress(f) + return nil, err + } + + if renamed != nil { + return renamed, nil + } + + // if not renamed, queue file for creation + if err := s.withTxn(ctx, func(ctx context.Context) error { + if err := s.Repository.Create(ctx, file); err != nil { + return fmt.Errorf("creating file %q: %w", path, err) + } + + if err := s.fireHandlers(ctx, file); err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + + return file, nil } -// generateHashes regenerates and sets the hashes in the provided File. -// It will not recalculate unless specified. -func (o Scanner) generateHashes(f *models.File, file SourceFile, regenerate bool) (changed bool, err error) { - existing := *f - - var src io.ReadCloser - if o.CalculateOSHash && (regenerate || f.OSHash == "") { - logger.Infof("Calculating oshash for %s ...", f.Path) - - size := file.FileInfo().Size() - - // #2196 for symlinks - // get the size of the actual file, not the symlink - if file.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink { - fi, err := os.Stat(f.Path) - if err != nil { - return false, err - } - logger.Debugf("File <%s> is symlink. Size changed from <%d> to <%d>", f.Path, size, fi.Size()) - size = fi.Size() - } - - src, err = file.Open() +func (s *scanJob) fireDecorators(ctx context.Context, fs FS, f File) (File, error) { + for _, h := range s.FileDecorators { + var err error + f, err = h.Decorate(ctx, fs, f) if err != nil { - return false, err - } - defer src.Close() - - seekSrc, valid := src.(io.ReadSeeker) - if !valid { - return false, fmt.Errorf("invalid source file type: %s", file.Path()) - } - - // regenerate hash - var oshash string - oshash, err = o.Hasher.OSHash(seekSrc, size) - if err != nil { - return false, fmt.Errorf("error generating oshash for %s: %w", file.Path(), err) - } - - f.OSHash = oshash - - // reset reader to start of file - _, err = seekSrc.Seek(0, io.SeekStart) - if err != nil { - return false, fmt.Errorf("error seeking to start of file in %s: %w", file.Path(), err) + return f, err } } - // always generate if MD5 is nil - // only regenerate MD5 if: - // - OSHash was not calculated, or - // - existing OSHash is different to generated one - // or if it was different to the previous version - if o.CalculateMD5 && (f.Checksum == "" || (regenerate && (!o.CalculateOSHash || existing.OSHash != f.OSHash))) { - logger.Infof("Calculating checksum for %s...", f.Path) + return f, nil +} - if src == nil { - src, err = file.Open() - if err != nil { - return false, err - } - defer src.Close() +func (s *scanJob) fireHandlers(ctx context.Context, f File) error { + for _, h := range s.handlers { + if err := h.Handle(ctx, f); err != nil { + return err } - - // regenerate checksum - var checksum string - checksum, err = o.Hasher.MD5(src) - if err != nil { - return - } - - f.Checksum = checksum } - changed = (o.CalculateOSHash && (f.OSHash != existing.OSHash)) || (o.CalculateMD5 && (f.Checksum != existing.Checksum)) - - return + return nil +} + +func (s *scanJob) calculateFingerprints(fs FS, f *BaseFile, path string) ([]Fingerprint, error) { + logger.Infof("Calculating fingerprints for %s ...", path) + + // calculate primary fingerprint for the file + fp, err := s.FingerprintCalculator.CalculateFingerprints(f, &fsOpener{ + fs: fs, + name: path, + }) + if err != nil { + return nil, fmt.Errorf("calculating fingerprint for file %q: %w", path, err) + } + + return fp, nil +} + +func appendFileUnique(v []File, toAdd []File) []File { + for _, f := range toAdd { + found := false + id := f.Base().ID + for _, vv := range v { + if vv.Base().ID == id { + found = true + break + } + } + + if !found { + v = append(v, f) + } + } + + return v +} + +func (s *scanJob) getFileFS(f *BaseFile) (FS, error) { + if f.ZipFile == nil { + return s.FS, nil + } + + fs, err := s.getFileFS(f.ZipFile.Base()) + if err != nil { + return nil, err + } + + zipPath := f.ZipFile.Base().Path + return fs.OpenZip(zipPath) +} + +func (s *scanJob) handleRename(ctx context.Context, f File, fp []Fingerprint) (File, error) { + var others []File + + for _, tfp := range fp { + thisOthers, err := s.Repository.FindByFingerprint(ctx, tfp) + if err != nil { + return nil, fmt.Errorf("getting files by fingerprint %v: %w", tfp, err) + } + + others = appendFileUnique(others, thisOthers) + } + + var missing []File + + for _, other := range others { + // if file does not exist, then update it to the new path + // TODO - handle #1426 scenario + fs, err := s.getFileFS(other.Base()) + if err != nil { + return nil, fmt.Errorf("getting FS for %q: %w", other.Base().Path, err) + } + + if _, err := fs.Lstat(other.Base().Path); err != nil { + missing = append(missing, other) + } + } + + n := len(missing) + if n == 0 { + // no missing files, not a rename + return nil, nil + } + + // assume does not exist, update existing file + // it's possible that there may be multiple missing files. + // just use the first one to rename. + other := missing[0] + otherBase := other.Base() + + fBase := f.Base() + + logger.Infof("%s moved to %s. Updating path...", otherBase.Path, fBase.Path) + fBase.ID = otherBase.ID + fBase.CreatedAt = otherBase.CreatedAt + fBase.Fingerprints = otherBase.Fingerprints + + if err := s.withTxn(ctx, func(ctx context.Context) error { + if err := s.Repository.Update(ctx, f); err != nil { + return fmt.Errorf("updating file for rename %q: %w", fBase.Path, err) + } + + return nil + }); err != nil { + return nil, err + } + + return f, nil +} + +func (s *scanJob) isHandlerRequired(ctx context.Context, f File) bool { + accept := len(s.options.HandlerRequiredFilters) == 0 + for _, filter := range s.options.HandlerRequiredFilters { + // accept if any filter accepts the file + if filter.Accept(ctx, f) { + accept = true + break + } + } + + return accept +} + +// isMissingMetadata returns true if the provided file is missing metadata. +// Missing metadata should only occur after the 32 schema migration. +// Looks for special values. For numbers, this will be -1. For strings, this +// will be 'unset'. +// Missing metadata includes the following: +// - file size +// - image format, width or height +// - video codec, audio codec, format, width, height, framerate or bitrate +func (s *scanJob) isMissingMetadata(existing File) bool { + const ( + unsetString = "unset" + unsetNumber = -1 + ) + + if existing.Base().Size == unsetNumber { + return true + } + + switch f := existing.(type) { + case *ImageFile: + return f.Format == unsetString || f.Width == unsetNumber || f.Height == unsetNumber + case *VideoFile: + return f.VideoCodec == unsetString || f.AudioCodec == unsetString || + f.Format == unsetString || f.Width == unsetNumber || + f.Height == unsetNumber || f.FrameRate == unsetNumber || + f.BitRate == unsetNumber + } + + return false +} + +func (s *scanJob) setMissingMetadata(ctx context.Context, f scanFile, existing File) (File, error) { + path := existing.Base().Path + logger.Infof("Setting missing metadata for %s", path) + + existing.Base().Size = f.Size + + var err error + existing, err = s.fireDecorators(ctx, f.fs, existing) + if err != nil { + return nil, err + } + + // queue file for update + if err := s.withTxn(ctx, func(ctx context.Context) error { + if err := s.Repository.Update(ctx, existing); err != nil { + return fmt.Errorf("updating file %q: %w", path, err) + } + + return nil + }); err != nil { + return nil, err + } + + return existing, nil +} + +// returns a file only if it was updated +func (s *scanJob) onExistingFile(ctx context.Context, f scanFile, existing File) (File, error) { + base := existing.Base() + path := base.Path + + fileModTime := f.ModTime + updated := !fileModTime.Equal(base.ModTime) + + if !updated { + isMissingMetdata := s.isMissingMetadata(existing) + // set missing information + if isMissingMetdata { + var err error + existing, err = s.setMissingMetadata(ctx, f, existing) + if err != nil { + return nil, err + } + } + + handlerRequired := false + if err := s.withDB(ctx, func(ctx context.Context) error { + // check if the handler needs to be run + handlerRequired = s.isHandlerRequired(ctx, existing) + return nil + }); err != nil { + return nil, err + } + + if !handlerRequired { + s.incrementProgress(f) + + // if this file is a zip file, then we need to rescan the contents + // as well. We do this by returning the file, instead of nil. + if isMissingMetdata { + return existing, nil + } + + return nil, nil + } + + if err := s.withTxn(ctx, func(ctx context.Context) error { + if err := s.fireHandlers(ctx, existing); err != nil { + return err + } + + s.incrementProgress(f) + return nil + }); err != nil { + return nil, err + } + + // if this file is a zip file, then we need to rescan the contents + // as well. We do this by returning the file, instead of nil. + if isMissingMetdata { + return existing, nil + } + + return nil, nil + } + + logger.Infof("%s has been updated: rescanning", path) + base.ModTime = fileModTime + base.Size = f.Size + base.UpdatedAt = time.Now() + + // calculate and update fingerprints for the file + fp, err := s.calculateFingerprints(f.fs, base, path) + if err != nil { + s.incrementProgress(f) + return nil, err + } + + existing.SetFingerprints(fp) + + existing, err = s.fireDecorators(ctx, f.fs, existing) + if err != nil { + s.incrementProgress(f) + return nil, err + } + + // queue file for update + if err := s.withTxn(ctx, func(ctx context.Context) error { + if err := s.Repository.Update(ctx, existing); err != nil { + return fmt.Errorf("updating file %q: %w", path, err) + } + + if err := s.fireHandlers(ctx, existing); err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + + return existing, nil } diff --git a/pkg/scene/caption.go b/pkg/file/video/caption.go similarity index 56% rename from pkg/scene/caption.go rename to pkg/file/video/caption.go index f45ba8a2d..8c10d0d1c 100644 --- a/pkg/scene/caption.go +++ b/pkg/file/video/caption.go @@ -1,14 +1,18 @@ -package scene +package video import ( + "context" + "fmt" "os" "path/filepath" "strings" - "golang.org/x/text/language" - "github.com/asticode/go-astisub" + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" + "golang.org/x/text/language" ) var CaptionExts = []string{"vtt", "srt"} // in a case where vtt and srt files are both provided prioritize vtt file due to native support @@ -46,7 +50,7 @@ func IsValidLanguage(lang string) bool { // IsLangInCaptions returns true if lang is present // in the captions -func IsLangInCaptions(lang string, ext string, captions []*models.SceneCaption) bool { +func IsLangInCaptions(lang string, ext string, captions []*models.VideoCaption) bool { for _, caption := range captions { if lang == caption.LanguageCode && ext == caption.CaptionType { return true @@ -55,41 +59,8 @@ func IsLangInCaptions(lang string, ext string, captions []*models.SceneCaption) return false } -// GenerateCaptionCandidates generates a list of filenames with exts as extensions -// that can associated with the caption -func GenerateCaptionCandidates(captionPath string, exts []string) []string { - var candidates []string - - basename := strings.TrimSuffix(captionPath, filepath.Ext(captionPath)) // caption filename without the extension - - // a caption file can be something like scene_filename.srt or scene_filename.en.srt - // if a language code is present and valid remove it from the basename - languageExt := filepath.Ext(basename) - if len(languageExt) > 2 && IsValidLanguage(languageExt[1:]) { - basename = strings.TrimSuffix(basename, languageExt) - } - - for _, ext := range exts { - candidates = append(candidates, basename+"."+ext) - } - - return candidates -} - -// GetCaptionsLangFromPath returns the language code from a given captions path -// If no valid language is present LangUknown is returned -func GetCaptionsLangFromPath(captionPath string) string { - langCode := LangUnknown - basename := strings.TrimSuffix(captionPath, filepath.Ext(captionPath)) // caption filename without the extension - languageExt := filepath.Ext(basename) - if len(languageExt) > 2 && IsValidLanguage(languageExt[1:]) { - langCode = languageExt[1:] - } - return langCode -} - // CleanCaptions removes non existent/accessible language codes from captions -func CleanCaptions(scenePath string, captions []*models.SceneCaption) (cleanedCaptions []*models.SceneCaption, changed bool) { +func CleanCaptions(scenePath string, captions []*models.VideoCaption) (cleanedCaptions []*models.VideoCaption, changed bool) { changed = false for _, caption := range captions { found := false @@ -104,3 +75,76 @@ func CleanCaptions(scenePath string, captions []*models.SceneCaption) (cleanedCa } return } + +// getCaptionPrefix returns the prefix used to search for video files for the provided caption path +func getCaptionPrefix(captionPath string) string { + basename := strings.TrimSuffix(captionPath, filepath.Ext(captionPath)) // caption filename without the extension + + // a caption file can be something like scene_filename.srt or scene_filename.en.srt + // if a language code is present and valid remove it from the basename + languageExt := filepath.Ext(basename) + if len(languageExt) > 2 && IsValidLanguage(languageExt[1:]) { + basename = strings.TrimSuffix(basename, languageExt) + } + + return basename + "." +} + +// GetCaptionsLangFromPath returns the language code from a given captions path +// If no valid language is present LangUknown is returned +func getCaptionsLangFromPath(captionPath string) string { + langCode := LangUnknown + basename := strings.TrimSuffix(captionPath, filepath.Ext(captionPath)) // caption filename without the extension + languageExt := filepath.Ext(basename) + if len(languageExt) > 2 && IsValidLanguage(languageExt[1:]) { + langCode = languageExt[1:] + } + return langCode +} + +type CaptionUpdater interface { + GetCaptions(ctx context.Context, fileID file.ID) ([]*models.VideoCaption, error) + UpdateCaptions(ctx context.Context, fileID file.ID, captions []*models.VideoCaption) error +} + +// associates captions to scene/s with the same basename +func AssociateCaptions(ctx context.Context, captionPath string, txnMgr txn.Manager, fqb file.Getter, w CaptionUpdater) { + captionLang := getCaptionsLangFromPath(captionPath) + + captionPrefix := getCaptionPrefix(captionPath) + if err := txn.WithTxn(ctx, txnMgr, func(ctx context.Context) error { + var err error + f, er := fqb.FindByPath(ctx, captionPrefix+"*") + + if er != nil { + return fmt.Errorf("searching for scene %s: %w", captionPrefix, er) + } + + if f != nil { // found related Scene + fileID := f.Base().ID + path := f.Base().Path + + logger.Debugf("Matched captions to file %s", path) + captions, er := w.GetCaptions(ctx, fileID) + if er == nil { + fileExt := filepath.Ext(captionPath) + ext := fileExt[1:] + if !IsLangInCaptions(captionLang, ext, captions) { // only update captions if language code is not present + newCaption := &models.VideoCaption{ + LanguageCode: captionLang, + Filename: filepath.Base(captionPath), + CaptionType: ext, + } + captions = append(captions, newCaption) + er = w.UpdateCaptions(ctx, fileID, captions) + if er == nil { + logger.Debugf("Updated captions for file %s. Added %s", path, captionLang) + } + } + } + } + return err + }); err != nil { + logger.Error(err.Error()) + } +} diff --git a/pkg/file/video/caption_test.go b/pkg/file/video/caption_test.go new file mode 100644 index 000000000..7c6f301da --- /dev/null +++ b/pkg/file/video/caption_test.go @@ -0,0 +1,53 @@ +package video + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type testCase struct { + captionPath string + expectedLang string + expectedResult string +} + +var testCases = []testCase{ + { + captionPath: "/stash/video.vtt", + expectedLang: LangUnknown, + expectedResult: "/stash/video.", + }, + { + captionPath: "/stash/video.en.vtt", + expectedLang: "en", + expectedResult: "/stash/video.", // lang code valid, remove en part + }, + { + captionPath: "/stash/video.test.srt", + expectedLang: LangUnknown, + expectedResult: "/stash/video.test.", // no lang code/lang code invalid test should remain + }, + { + captionPath: "C:\\videos\\video.fr.srt", + expectedLang: "fr", + expectedResult: "C:\\videos\\video.", + }, + { + captionPath: "C:\\videos\\video.xx.srt", + expectedLang: LangUnknown, + expectedResult: "C:\\videos\\video.xx.", // no lang code/lang code invalid xx should remain + }, +} + +func TestGenerateCaptionCandidates(t *testing.T) { + for _, c := range testCases { + assert.Equal(t, c.expectedResult, getCaptionPrefix(c.captionPath)) + } +} + +func TestGetCaptionsLangFromPath(t *testing.T) { + for _, l := range testCases { + assert.Equal(t, l.expectedLang, getCaptionsLangFromPath(l.captionPath)) + } +} diff --git a/pkg/scene/funscript.go b/pkg/file/video/funscript.go similarity index 95% rename from pkg/scene/funscript.go rename to pkg/file/video/funscript.go index 8a28d3e77..073057cf6 100644 --- a/pkg/scene/funscript.go +++ b/pkg/file/video/funscript.go @@ -1,4 +1,4 @@ -package scene +package video import ( "path/filepath" diff --git a/pkg/file/video/scan.go b/pkg/file/video/scan.go new file mode 100644 index 000000000..4faea85aa --- /dev/null +++ b/pkg/file/video/scan.go @@ -0,0 +1,57 @@ +package video + +import ( + "context" + "errors" + "fmt" + + "github.com/stashapp/stash/pkg/ffmpeg" + "github.com/stashapp/stash/pkg/file" +) + +// Decorator adds video specific fields to a File. +type Decorator struct { + FFProbe ffmpeg.FFProbe +} + +func (d *Decorator) Decorate(ctx context.Context, fs file.FS, f file.File) (file.File, error) { + if d.FFProbe == "" { + return f, errors.New("ffprobe not configured") + } + + base := f.Base() + // TODO - copy to temp file if not an OsFS + if _, isOs := fs.(*file.OsFS); !isOs { + return f, fmt.Errorf("video.constructFile: only OsFS is supported") + } + + probe := d.FFProbe + videoFile, err := probe.NewVideoFile(base.Path) + if err != nil { + return f, fmt.Errorf("running ffprobe on %q: %w", base.Path, err) + } + + container, err := ffmpeg.MatchContainer(videoFile.Container, base.Path) + if err != nil { + return f, fmt.Errorf("matching container for %q: %w", base.Path, err) + } + + // check if there is a funscript file + interactive := false + if _, err := fs.Lstat(GetFunscriptPath(base.Path)); err == nil { + interactive = true + } + + return &file.VideoFile{ + BaseFile: base, + Format: string(container), + VideoCodec: videoFile.VideoCodec, + AudioCodec: videoFile.AudioCodec, + Width: videoFile.Width, + Height: videoFile.Height, + Duration: videoFile.Duration, + FrameRate: videoFile.FrameRate, + BitRate: videoFile.Bitrate, + Interactive: interactive, + }, nil +} diff --git a/pkg/file/video_file.go b/pkg/file/video_file.go new file mode 100644 index 000000000..ec08aad87 --- /dev/null +++ b/pkg/file/video_file.go @@ -0,0 +1,28 @@ +package file + +// VideoFile is an extension of BaseFile to represent video files. +type VideoFile struct { + *BaseFile + Format string `json:"format"` + Width int `json:"width"` + Height int `json:"height"` + Duration float64 `json:"duration"` + VideoCodec string `json:"video_codec"` + AudioCodec string `json:"audio_codec"` + FrameRate float64 `json:"frame_rate"` + BitRate int64 `json:"bitrate"` + + Interactive bool `json:"interactive"` + InteractiveSpeed *int `json:"interactive_speed"` +} + +func (f VideoFile) GetMinResolution() int { + w := f.Width + h := f.Height + + if w < h { + return w + } + + return h +} diff --git a/pkg/file/walk.go b/pkg/file/walk.go new file mode 100644 index 000000000..8c7fdc5c9 --- /dev/null +++ b/pkg/file/walk.go @@ -0,0 +1,153 @@ +package file + +import ( + "errors" + "io/fs" + "os" + "path/filepath" + "sort" +) + +// Modified from github.com/facebookgo/symwalk + +// BSD License + +// For symwalk software + +// Copyright (c) 2015, Facebook, Inc. All rights reserved. + +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: + +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. + +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// * Neither the name Facebook nor the names of its contributors may be used to +// endorse or promote products derived from this software without specific +// prior written permission. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// symwalkFunc calls the provided WalkFn for regular files. +// However, when it encounters a symbolic link, it resolves the link fully using the +// filepath.EvalSymlinks function and recursively calls symwalk.Walk on the resolved path. +// This ensures that unlink filepath.Walk, traversal does not stop at symbolic links. +// +// Note that symwalk.Walk does not terminate if there are any non-terminating loops in +// the file structure. +func walkSym(f FS, filename string, linkDirname string, walkFn fs.WalkDirFunc) error { + symWalkFunc := func(path string, info fs.DirEntry, err error) error { + + if fname, err := filepath.Rel(filename, path); err == nil { + path = filepath.Join(linkDirname, fname) + } else { + return err + } + + if err == nil && info.Type()&os.ModeSymlink == os.ModeSymlink { + finalPath, err := filepath.EvalSymlinks(path) + if err != nil { + // don't bail out if symlink is invalid + return walkFn(path, info, err) + } + info, err := f.Lstat(finalPath) + if err != nil { + return walkFn(path, &statDirEntry{ + info: info, + }, err) + } + if info.IsDir() { + return walkSym(f, finalPath, path, walkFn) + } + } + + return walkFn(path, info, err) + } + return fsWalk(f, filename, symWalkFunc) +} + +// symWalk extends filepath.Walk to also follow symlinks +func symWalk(fs FS, path string, walkFn fs.WalkDirFunc) error { + return walkSym(fs, path, path, walkFn) +} + +type statDirEntry struct { + info fs.FileInfo +} + +func (d *statDirEntry) Name() string { return d.info.Name() } +func (d *statDirEntry) IsDir() bool { return d.info.IsDir() } +func (d *statDirEntry) Type() fs.FileMode { return d.info.Mode().Type() } +func (d *statDirEntry) Info() (fs.FileInfo, error) { return d.info, nil } + +func fsWalk(f FS, root string, fn fs.WalkDirFunc) error { + info, err := f.Lstat(root) + if err != nil { + err = fn(root, nil, err) + } else { + err = walkDir(f, root, &statDirEntry{info}, fn) + } + if errors.Is(err, fs.SkipDir) { + return nil + } + return err +} + +func walkDir(f FS, path string, d fs.DirEntry, walkDirFn fs.WalkDirFunc) error { + if err := walkDirFn(path, d, nil); err != nil || !d.IsDir() { + if errors.Is(err, fs.SkipDir) && d.IsDir() { + // Successfully skipped directory. + err = nil + } + return err + } + + dirs, err := readDir(f, path) + if err != nil { + // Second call, to report ReadDir error. + err = walkDirFn(path, d, err) + if err != nil { + return err + } + } + + for _, d1 := range dirs { + path1 := filepath.Join(path, d1.Name()) + if err := walkDir(f, path1, d1, walkDirFn); err != nil { + if errors.Is(err, fs.SkipDir) { + break + } + return err + } + } + return nil +} + +// readDir reads the directory named by dirname and returns +// a sorted list of directory entries. +func readDir(fs FS, dirname string) ([]fs.DirEntry, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + dirs, err := f.ReadDir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Slice(dirs, func(i, j int) bool { return dirs[i].Name() < dirs[j].Name() }) + return dirs, nil +} diff --git a/pkg/file/zip.go b/pkg/file/zip.go index 4028beea5..f610b8b1c 100644 --- a/pkg/file/zip.go +++ b/pkg/file/zip.go @@ -2,63 +2,135 @@ package file import ( "archive/zip" + "errors" + "fmt" "io" "io/fs" - "strings" + "path/filepath" ) -const zipSeparator = "\x00" +var ( + errNotReaderAt = errors.New("not a ReaderAt") + errZipFSOpenZip = errors.New("cannot open zip file inside zip file") +) -type zipFile struct { - zipPath string - zf *zip.File +// ZipFS is a file system backed by a zip file. +type ZipFS struct { + *zip.Reader + zipFileCloser io.Closer + zipInfo fs.FileInfo + zipPath string } -func (f *zipFile) Open() (io.ReadCloser, error) { - return f.zf.Open() -} - -func (f *zipFile) Path() string { - // TODO - fix this - return ZipFilename(f.zipPath, f.zf.Name) -} - -func (f *zipFile) FileInfo() fs.FileInfo { - return f.zf.FileInfo() -} - -func ZipFile(zipPath string, zf *zip.File) SourceFile { - return &zipFile{ - zipPath: zipPath, - zf: zf, +func newZipFS(fs FS, path string, info fs.FileInfo) (*ZipFS, error) { + reader, err := fs.Open(path) + if err != nil { + return nil, err } -} -func ZipFilename(zipFilename, filenameInZip string) string { - return zipFilename + zipSeparator + filenameInZip -} - -// IsZipPath returns true if the path includes the zip separator byte, -// indicating it is within a zip file. -func IsZipPath(p string) bool { - return strings.Contains(p, zipSeparator) -} - -// ZipPathDisplayName converts an zip path for display. It translates the zip -// file separator character into '/', since this character is also used for -// path separators within zip files. It returns the original provided path -// if it does not contain the zip file separator character. -func ZipPathDisplayName(path string) string { - return strings.ReplaceAll(path, zipSeparator, "/") -} - -func ZipFilePath(path string) (zipFilename, filename string) { - nullIndex := strings.Index(path, zipSeparator) - if nullIndex != -1 { - zipFilename = path[0:nullIndex] - filename = path[nullIndex+1:] - } else { - filename = path + asReaderAt, _ := reader.(io.ReaderAt) + if asReaderAt == nil { + reader.Close() + return nil, errNotReaderAt } - return + + zipReader, err := zip.NewReader(asReaderAt, info.Size()) + if err != nil { + reader.Close() + return nil, err + } + + return &ZipFS{ + Reader: zipReader, + zipFileCloser: reader, + zipInfo: info, + zipPath: path, + }, nil +} + +func (f *ZipFS) rel(name string) (string, error) { + if f.zipPath == name { + return ".", nil + } + + relName, err := filepath.Rel(f.zipPath, name) + if err != nil { + return "", fmt.Errorf("internal error getting relative path: %w", err) + } + + // convert relName to use slash, since zip files do so regardless + // of os + relName = filepath.ToSlash(relName) + + return relName, nil +} + +func (f *ZipFS) Lstat(name string) (fs.FileInfo, error) { + reader, err := f.Open(name) + if err != nil { + return nil, err + } + defer reader.Close() + + return reader.Stat() +} + +func (f *ZipFS) OpenZip(name string) (*ZipFS, error) { + return nil, errZipFSOpenZip +} + +type zipReadDirFile struct { + fs.File +} + +func (f *zipReadDirFile) ReadDir(n int) ([]fs.DirEntry, error) { + asReadDirFile, _ := f.File.(fs.ReadDirFile) + if asReadDirFile == nil { + return nil, fmt.Errorf("internal error: not a ReadDirFile") + } + + return asReadDirFile.ReadDir(n) +} + +func (f *ZipFS) Open(name string) (fs.ReadDirFile, error) { + relName, err := f.rel(name) + if err != nil { + return nil, err + } + + r, err := f.Reader.Open(relName) + if err != nil { + return nil, err + } + + return &zipReadDirFile{ + File: r, + }, nil +} + +func (f *ZipFS) Close() error { + return f.zipFileCloser.Close() +} + +// openOnly returns a ReadCloser where calling Close will close the zip fs as well. +func (f *ZipFS) OpenOnly(name string) (io.ReadCloser, error) { + r, err := f.Open(name) + if err != nil { + return nil, err + } + + return &wrappedReadCloser{ + ReadCloser: r, + outer: f, + }, nil +} + +type wrappedReadCloser struct { + io.ReadCloser + outer io.Closer +} + +func (f *wrappedReadCloser) Close() error { + _ = f.ReadCloser.Close() + return f.outer.Close() } diff --git a/pkg/fsutil/file.go b/pkg/fsutil/file.go index e958397f6..a3ccfc2e6 100644 --- a/pkg/fsutil/file.go +++ b/pkg/fsutil/file.go @@ -5,6 +5,7 @@ import ( "io" "os" "path/filepath" + "regexp" "strings" ) @@ -116,3 +117,25 @@ func Touch(path string) error { } return nil } + +var ( + replaceCharsRE = regexp.MustCompile(`[&=\\/:*"?_ ]`) + removeCharsRE = regexp.MustCompile(`[^[:alnum:]-.]`) + multiHyphenRE = regexp.MustCompile(`\-+`) +) + +// SanitiseBasename returns a file basename removing any characters that are illegal or problematic to use in the filesystem. +func SanitiseBasename(v string) string { + v = strings.TrimSpace(v) + + // replace illegal filename characters with - + v = replaceCharsRE.ReplaceAllString(v, "-") + + // remove other characters + v = removeCharsRE.ReplaceAllString(v, "") + + // remove multiple hyphens + v = multiHyphenRE.ReplaceAllString(v, "-") + + return strings.TrimSpace(v) +} diff --git a/pkg/fsutil/file_test.go b/pkg/fsutil/file_test.go new file mode 100644 index 000000000..393d3b420 --- /dev/null +++ b/pkg/fsutil/file_test.go @@ -0,0 +1,26 @@ +package fsutil + +import "testing" + +func TestSanitiseBasename(t *testing.T) { + tests := []struct { + name string + v string + want string + }{ + {"basic", "basic", "basic"}, + {"spaces", `spaced name`, "spaced-name"}, + {"leading/trailing spaces", ` spaced name `, "spaced-name"}, + {"hyphen name", `hyphened-name`, "hyphened-name"}, + {"multi-hyphen", `hyphened--name`, "hyphened-name"}, + {"replaced characters", `a&b=c\d/:e*"f?_ g`, "a-b-c-d-e-f-g"}, + {"removed characters", `foo!!bar@@and, more`, "foobarand-more"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := SanitiseBasename(tt.v); got != tt.want { + t.Errorf("SanitiseBasename() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/gallery/delete.go b/pkg/gallery/delete.go new file mode 100644 index 000000000..b6c1333ba --- /dev/null +++ b/pkg/gallery/delete.go @@ -0,0 +1,115 @@ +package gallery + +import ( + "context" + + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/image" + "github.com/stashapp/stash/pkg/models" +) + +func (s *Service) Destroy(ctx context.Context, i *models.Gallery, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) ([]*models.Image, error) { + var imgsDestroyed []*models.Image + + // if this is a zip-based gallery, delete the images as well first + zipImgsDestroyed, err := s.destroyZipFileImages(ctx, i, fileDeleter, deleteGenerated, deleteFile) + if err != nil { + return nil, err + } + + imgsDestroyed = zipImgsDestroyed + + // only delete folder based gallery images if we're deleting the folder + if deleteFile { + folderImgsDestroyed, err := s.destroyFolderImages(ctx, i, fileDeleter, deleteGenerated, deleteFile) + if err != nil { + return nil, err + } + + imgsDestroyed = append(imgsDestroyed, folderImgsDestroyed...) + } + + // we only want to delete a folder-based gallery if it is empty. + // this has to be done post-transaction + + if err := s.Repository.Destroy(ctx, i.ID); err != nil { + return nil, err + } + + return imgsDestroyed, nil +} + +func (s *Service) destroyZipFileImages(ctx context.Context, i *models.Gallery, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) ([]*models.Image, error) { + if err := i.LoadFiles(ctx, s.Repository); err != nil { + return nil, err + } + + var imgsDestroyed []*models.Image + + destroyer := &file.ZipDestroyer{ + FileDestroyer: s.File, + FolderDestroyer: s.Folder, + } + + // for zip-based galleries, delete the images as well first + for _, f := range i.Files.List() { + // only do this where there are no other galleries related to the file + otherGalleries, err := s.Repository.FindByFileID(ctx, f.Base().ID) + if err != nil { + return nil, err + } + + if len(otherGalleries) > 1 { + // other gallery associated, don't remove + continue + } + + thisDestroyed, err := s.ImageService.DestroyZipImages(ctx, f, fileDeleter, deleteGenerated) + if err != nil { + return nil, err + } + + imgsDestroyed = append(imgsDestroyed, thisDestroyed...) + + if deleteFile { + if err := destroyer.DestroyZip(ctx, f, fileDeleter.Deleter, deleteFile); err != nil { + return nil, err + } + } + } + + return imgsDestroyed, nil +} + +func (s *Service) destroyFolderImages(ctx context.Context, i *models.Gallery, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) ([]*models.Image, error) { + if i.FolderID == nil { + return nil, nil + } + + var imgsDestroyed []*models.Image + + // find images in this folder + imgs, err := s.ImageFinder.FindByFolderID(ctx, *i.FolderID) + if err != nil { + return nil, err + } + + for _, img := range imgs { + if err := img.LoadGalleryIDs(ctx, s.ImageFinder); err != nil { + return nil, err + } + + // only destroy images that are not attached to other galleries + if len(img.GalleryIDs.List()) > 1 { + continue + } + + if err := s.ImageService.Destroy(ctx, img, fileDeleter, deleteGenerated, deleteFile); err != nil { + return nil, err + } + + imgsDestroyed = append(imgsDestroyed, img) + } + + return imgsDestroyed, nil +} diff --git a/pkg/gallery/export.go b/pkg/gallery/export.go index f24660e60..ebd8a8604 100644 --- a/pkg/gallery/export.go +++ b/pkg/gallery/export.go @@ -1,60 +1,51 @@ package gallery import ( + "context" + "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" "github.com/stashapp/stash/pkg/models/jsonschema" - "github.com/stashapp/stash/pkg/utils" + "github.com/stashapp/stash/pkg/studio" ) // ToBasicJSON converts a gallery object into its JSON object equivalent. It // does not convert the relationships to other objects. func ToBasicJSON(gallery *models.Gallery) (*jsonschema.Gallery, error) { newGalleryJSON := jsonschema.Gallery{ - Checksum: gallery.Checksum, - Zip: gallery.Zip, - CreatedAt: json.JSONTime{Time: gallery.CreatedAt.Timestamp}, - UpdatedAt: json.JSONTime{Time: gallery.UpdatedAt.Timestamp}, + Title: gallery.Title, + URL: gallery.URL, + Details: gallery.Details, + CreatedAt: json.JSONTime{Time: gallery.CreatedAt}, + UpdatedAt: json.JSONTime{Time: gallery.UpdatedAt}, } - if gallery.Path.Valid { - newGalleryJSON.Path = gallery.Path.String + if gallery.FolderID != nil { + newGalleryJSON.FolderPath = gallery.Path } - if gallery.FileModTime.Valid { - newGalleryJSON.FileModTime = json.JSONTime{Time: gallery.FileModTime.Timestamp} + for _, f := range gallery.Files.List() { + newGalleryJSON.ZipFiles = append(newGalleryJSON.ZipFiles, f.Base().Path) } - if gallery.Title.Valid { - newGalleryJSON.Title = gallery.Title.String + if gallery.Date != nil { + newGalleryJSON.Date = gallery.Date.String() } - if gallery.URL.Valid { - newGalleryJSON.URL = gallery.URL.String - } - - if gallery.Date.Valid { - newGalleryJSON.Date = utils.GetYMDFromDatabaseDate(gallery.Date.String) - } - - if gallery.Rating.Valid { - newGalleryJSON.Rating = int(gallery.Rating.Int64) + if gallery.Rating != nil { + newGalleryJSON.Rating = *gallery.Rating } newGalleryJSON.Organized = gallery.Organized - if gallery.Details.Valid { - newGalleryJSON.Details = gallery.Details.String - } - return &newGalleryJSON, nil } // GetStudioName returns the name of the provided gallery's studio. It returns an // empty string if there is no studio assigned to the gallery. -func GetStudioName(reader models.StudioReader, gallery *models.Gallery) (string, error) { - if gallery.StudioID.Valid { - studio, err := reader.Find(int(gallery.StudioID.Int64)) +func GetStudioName(ctx context.Context, reader studio.Finder, gallery *models.Gallery) (string, error) { + if gallery.StudioID != nil { + studio, err := reader.Find(ctx, *gallery.StudioID) if err != nil { return "", err } @@ -76,12 +67,22 @@ func GetIDs(galleries []*models.Gallery) []int { return results } -func GetChecksums(galleries []*models.Gallery) []string { - var results []string +func GetRefs(galleries []*models.Gallery) []jsonschema.GalleryRef { + var results []jsonschema.GalleryRef for _, gallery := range galleries { - if gallery.Checksum != "" { - results = append(results, gallery.Checksum) + toAdd := jsonschema.GalleryRef{} + switch { + case gallery.FolderID != nil: + toAdd.FolderPath = gallery.Path + case len(gallery.Files.List()) > 0: + for _, f := range gallery.Files.List() { + toAdd.ZipFiles = append(toAdd.ZipFiles, f.Base().Path) + } + default: + toAdd.Title = gallery.Title } + + results = append(results, toAdd) } return results diff --git a/pkg/gallery/export_test.go b/pkg/gallery/export_test.go index 80418d7e0..13f8227f4 100644 --- a/pkg/gallery/export_test.go +++ b/pkg/gallery/export_test.go @@ -3,6 +3,7 @@ package gallery import ( "errors" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" "github.com/stashapp/stash/pkg/models/jsonschema" @@ -21,16 +22,13 @@ const ( errStudioID = 6 // noTagsID = 11 - errTagsID = 12 ) -const ( - path = "path" - isZip = true +var ( url = "url" - checksum = "checksum" title = "title" date = "2001-01-01" + dateObj = models.NewDate(date) rating = 5 organized = true details = "details" @@ -38,6 +36,7 @@ const ( const ( studioName = "studioName" + path = "path" ) var ( @@ -47,39 +46,32 @@ var ( func createFullGallery(id int) models.Gallery { return models.Gallery{ - ID: id, - Path: models.NullString(path), - Zip: isZip, - Title: models.NullString(title), - Checksum: checksum, - Date: models.SQLiteDate{ - String: date, - Valid: true, - }, - Details: models.NullString(details), - Rating: models.NullInt64(rating), + ID: id, + Files: models.NewRelatedFiles([]file.File{ + &file.BaseFile{ + Path: path, + }, + }), + Title: title, + Date: &dateObj, + Details: details, + Rating: &rating, Organized: organized, - URL: models.NullString(url), - CreatedAt: models.SQLiteTimestamp{ - Timestamp: createTime, - }, - UpdatedAt: models.SQLiteTimestamp{ - Timestamp: updateTime, - }, + URL: url, + CreatedAt: createTime, + UpdatedAt: updateTime, } } func createFullJSONGallery() *jsonschema.Gallery { return &jsonschema.Gallery{ Title: title, - Path: path, - Zip: isZip, - Checksum: checksum, Date: date, Details: details, Rating: rating, Organized: organized, URL: url, + ZipFiles: []string{path}, CreatedAt: json.JSONTime{ Time: createTime, }, @@ -121,7 +113,7 @@ func TestToJSON(t *testing.T) { func createStudioGallery(studioID int) models.Gallery { return models.Gallery{ - StudioID: models.NullInt64(int64(studioID)), + StudioID: &studioID, } } @@ -154,15 +146,15 @@ func TestGetStudioName(t *testing.T) { studioErr := errors.New("error getting image") - mockStudioReader.On("Find", studioID).Return(&models.Studio{ + mockStudioReader.On("Find", testCtx, studioID).Return(&models.Studio{ Name: models.NullString(studioName), }, nil).Once() - mockStudioReader.On("Find", missingStudioID).Return(nil, nil).Once() - mockStudioReader.On("Find", errStudioID).Return(nil, studioErr).Once() + mockStudioReader.On("Find", testCtx, missingStudioID).Return(nil, nil).Once() + mockStudioReader.On("Find", testCtx, errStudioID).Return(nil, studioErr).Once() for i, s := range getStudioScenarios { gallery := s.input - json, err := GetStudioName(mockStudioReader, &gallery) + json, err := GetStudioName(testCtx, mockStudioReader, &gallery) switch { case !s.err && err != nil: diff --git a/pkg/gallery/import.go b/pkg/gallery/import.go index f82cff13b..c324d8d72 100644 --- a/pkg/gallery/import.go +++ b/pkg/gallery/import.go @@ -1,40 +1,53 @@ package gallery import ( - "database/sql" + "context" "fmt" "strings" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/performer" "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/studio" + "github.com/stashapp/stash/pkg/tag" ) type Importer struct { - ReaderWriter models.GalleryReaderWriter - StudioWriter models.StudioReaderWriter - PerformerWriter models.PerformerReaderWriter - TagWriter models.TagReaderWriter + ReaderWriter FullCreatorUpdater + StudioWriter studio.NameFinderCreator + PerformerWriter performer.NameFinderCreator + TagWriter tag.NameFinderCreator + FileFinder file.Getter + FolderFinder file.FolderGetter Input jsonschema.Gallery MissingRefBehaviour models.ImportMissingRefEnum - gallery models.Gallery - performers []*models.Performer - tags []*models.Tag + gallery models.Gallery } -func (i *Importer) PreImport() error { +type FullCreatorUpdater interface { + FinderCreatorUpdater + Update(ctx context.Context, updatedGallery *models.Gallery) error +} + +func (i *Importer) PreImport(ctx context.Context) error { i.gallery = i.galleryJSONToGallery(i.Input) - if err := i.populateStudio(); err != nil { + if err := i.populateFilesFolder(ctx); err != nil { return err } - if err := i.populatePerformers(); err != nil { + if err := i.populateStudio(ctx); err != nil { return err } - if err := i.populateTags(); err != nil { + if err := i.populatePerformers(ctx); err != nil { + return err + } + + if err := i.populateTags(ctx); err != nil { return err } @@ -43,40 +56,37 @@ func (i *Importer) PreImport() error { func (i *Importer) galleryJSONToGallery(galleryJSON jsonschema.Gallery) models.Gallery { newGallery := models.Gallery{ - Checksum: galleryJSON.Checksum, - Zip: galleryJSON.Zip, - } - - if galleryJSON.Path != "" { - newGallery.Path = sql.NullString{String: galleryJSON.Path, Valid: true} + PerformerIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), } if galleryJSON.Title != "" { - newGallery.Title = sql.NullString{String: galleryJSON.Title, Valid: true} + newGallery.Title = galleryJSON.Title } if galleryJSON.Details != "" { - newGallery.Details = sql.NullString{String: galleryJSON.Details, Valid: true} + newGallery.Details = galleryJSON.Details } if galleryJSON.URL != "" { - newGallery.URL = sql.NullString{String: galleryJSON.URL, Valid: true} + newGallery.URL = galleryJSON.URL } if galleryJSON.Date != "" { - newGallery.Date = models.SQLiteDate{String: galleryJSON.Date, Valid: true} + d := models.NewDate(galleryJSON.Date) + newGallery.Date = &d } if galleryJSON.Rating != 0 { - newGallery.Rating = sql.NullInt64{Int64: int64(galleryJSON.Rating), Valid: true} + newGallery.Rating = &galleryJSON.Rating } newGallery.Organized = galleryJSON.Organized - newGallery.CreatedAt = models.SQLiteTimestamp{Timestamp: galleryJSON.CreatedAt.GetTime()} - newGallery.UpdatedAt = models.SQLiteTimestamp{Timestamp: galleryJSON.UpdatedAt.GetTime()} + newGallery.CreatedAt = galleryJSON.CreatedAt.GetTime() + newGallery.UpdatedAt = galleryJSON.UpdatedAt.GetTime() return newGallery } -func (i *Importer) populateStudio() error { +func (i *Importer) populateStudio(ctx context.Context) error { if i.Input.Studio != "" { - studio, err := i.StudioWriter.FindByName(i.Input.Studio, false) + studio, err := i.StudioWriter.FindByName(ctx, i.Input.Studio, false) if err != nil { return fmt.Errorf("error finding studio by name: %v", err) } @@ -91,27 +101,24 @@ func (i *Importer) populateStudio() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - studioID, err := i.createStudio(i.Input.Studio) + studioID, err := i.createStudio(ctx, i.Input.Studio) if err != nil { return err } - i.gallery.StudioID = sql.NullInt64{ - Int64: int64(studioID), - Valid: true, - } + i.gallery.StudioID = &studioID } } else { - i.gallery.StudioID = sql.NullInt64{Int64: int64(studio.ID), Valid: true} + i.gallery.StudioID = &studio.ID } } return nil } -func (i *Importer) createStudio(name string) (int, error) { +func (i *Importer) createStudio(ctx context.Context, name string) (int, error) { newStudio := *models.NewStudio(name) - created, err := i.StudioWriter.Create(newStudio) + created, err := i.StudioWriter.Create(ctx, newStudio) if err != nil { return 0, err } @@ -119,10 +126,10 @@ func (i *Importer) createStudio(name string) (int, error) { return created.ID, nil } -func (i *Importer) populatePerformers() error { +func (i *Importer) populatePerformers(ctx context.Context) error { if len(i.Input.Performers) > 0 { names := i.Input.Performers - performers, err := i.PerformerWriter.FindByNames(names, false) + performers, err := i.PerformerWriter.FindByNames(ctx, names, false) if err != nil { return err } @@ -145,7 +152,7 @@ func (i *Importer) populatePerformers() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - createdPerformers, err := i.createPerformers(missingPerformers) + createdPerformers, err := i.createPerformers(ctx, missingPerformers) if err != nil { return fmt.Errorf("error creating gallery performers: %v", err) } @@ -156,18 +163,20 @@ func (i *Importer) populatePerformers() error { // ignore if MissingRefBehaviour set to Ignore } - i.performers = performers + for _, p := range performers { + i.gallery.PerformerIDs.Add(p.ID) + } } return nil } -func (i *Importer) createPerformers(names []string) ([]*models.Performer, error) { +func (i *Importer) createPerformers(ctx context.Context, names []string) ([]*models.Performer, error) { var ret []*models.Performer for _, name := range names { newPerformer := *models.NewPerformer(name) - created, err := i.PerformerWriter.Create(newPerformer) + created, err := i.PerformerWriter.Create(ctx, newPerformer) if err != nil { return nil, err } @@ -178,10 +187,10 @@ func (i *Importer) createPerformers(names []string) ([]*models.Performer, error) return ret, nil } -func (i *Importer) populateTags() error { +func (i *Importer) populateTags(ctx context.Context) error { if len(i.Input.Tags) > 0 { names := i.Input.Tags - tags, err := i.TagWriter.FindByNames(names, false) + tags, err := i.TagWriter.FindByNames(ctx, names, false) if err != nil { return err } @@ -201,7 +210,7 @@ func (i *Importer) populateTags() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - createdTags, err := i.createTags(missingTags) + createdTags, err := i.createTags(ctx, missingTags) if err != nil { return fmt.Errorf("error creating gallery tags: %v", err) } @@ -212,18 +221,20 @@ func (i *Importer) populateTags() error { // ignore if MissingRefBehaviour set to Ignore } - i.tags = tags + for _, t := range tags { + i.gallery.TagIDs.Add(t.ID) + } } return nil } -func (i *Importer) createTags(names []string) ([]*models.Tag, error) { +func (i *Importer) createTags(ctx context.Context, names []string) ([]*models.Tag, error) { var ret []*models.Tag for _, name := range names { newTag := *models.NewTag(name) - created, err := i.TagWriter.Create(newTag) + created, err := i.TagWriter.Create(ctx, newTag) if err != nil { return nil, err } @@ -234,63 +245,113 @@ func (i *Importer) createTags(names []string) ([]*models.Tag, error) { return ret, nil } -func (i *Importer) PostImport(id int) error { - if len(i.performers) > 0 { - var performerIDs []int - for _, performer := range i.performers { - performerIDs = append(performerIDs, performer.ID) +func (i *Importer) populateFilesFolder(ctx context.Context) error { + files := make([]file.File, 0) + + for _, ref := range i.Input.ZipFiles { + path := ref + f, err := i.FileFinder.FindByPath(ctx, path) + if err != nil { + return fmt.Errorf("error finding file: %w", err) } - if err := i.ReaderWriter.UpdatePerformers(id, performerIDs); err != nil { - return fmt.Errorf("failed to associate performers: %v", err) + if f == nil { + return fmt.Errorf("gallery zip file '%s' not found", path) + } else { + files = append(files, f) } } - if len(i.tags) > 0 { - var tagIDs []int - for _, t := range i.tags { - tagIDs = append(tagIDs, t.ID) + i.gallery.Files = models.NewRelatedFiles(files) + + if i.Input.FolderPath != "" { + path := i.Input.FolderPath + f, err := i.FolderFinder.FindByPath(ctx, path) + if err != nil { + return fmt.Errorf("error finding folder: %w", err) } - if err := i.ReaderWriter.UpdateTags(id, tagIDs); err != nil { - return fmt.Errorf("failed to associate tags: %v", err) + + if f == nil { + return fmt.Errorf("gallery folder '%s' not found", path) + } else { + i.gallery.FolderID = &f.ID } } return nil } -func (i *Importer) Name() string { - return i.Input.Path +func (i *Importer) PostImport(ctx context.Context, id int) error { + return nil } -func (i *Importer) FindExistingID() (*int, error) { - existing, err := i.ReaderWriter.FindByChecksum(i.Input.Checksum) +func (i *Importer) Name() string { + if i.Input.Title != "" { + return i.Input.Title + } + + if i.Input.FolderPath != "" { + return i.Input.FolderPath + } + + if len(i.Input.ZipFiles) > 0 { + return i.Input.ZipFiles[0] + } + + return "" +} + +func (i *Importer) FindExistingID(ctx context.Context) (*int, error) { + var existing []*models.Gallery + var err error + switch { + case len(i.gallery.Files.List()) > 0: + for _, f := range i.gallery.Files.List() { + existing, err := i.ReaderWriter.FindByFileID(ctx, f.Base().ID) + if err != nil { + return nil, err + } + + if existing != nil { + break + } + } + case i.gallery.FolderID != nil: + existing, err = i.ReaderWriter.FindByFolderID(ctx, *i.gallery.FolderID) + default: + existing, err = i.ReaderWriter.FindUserGalleryByTitle(ctx, i.gallery.Title) + } + if err != nil { return nil, err } - if existing != nil { - id := existing.ID + if len(existing) > 0 { + id := existing[0].ID return &id, nil } return nil, nil } -func (i *Importer) Create() (*int, error) { - created, err := i.ReaderWriter.Create(i.gallery) +func (i *Importer) Create(ctx context.Context) (*int, error) { + var fileIDs []file.ID + for _, f := range i.gallery.Files.List() { + fileIDs = append(fileIDs, f.Base().ID) + } + err := i.ReaderWriter.Create(ctx, &i.gallery, fileIDs) if err != nil { return nil, fmt.Errorf("error creating gallery: %v", err) } - id := created.ID + id := i.gallery.ID return &id, nil } -func (i *Importer) Update(id int) error { +func (i *Importer) Update(ctx context.Context, id int) error { gallery := i.gallery gallery.ID = id - _, err := i.ReaderWriter.Update(gallery) + err := i.ReaderWriter.Update(ctx, &gallery) if err != nil { return fmt.Errorf("error updating existing gallery: %v", err) } diff --git a/pkg/gallery/import_test.go b/pkg/gallery/import_test.go index d50fd16d1..43634fd13 100644 --- a/pkg/gallery/import_test.go +++ b/pkg/gallery/import_test.go @@ -1,10 +1,12 @@ package gallery import ( + "context" "errors" "testing" "time" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" "github.com/stashapp/stash/pkg/models/jsonschema" @@ -13,11 +15,7 @@ import ( "github.com/stretchr/testify/mock" ) -const ( - galleryNameErr = "galleryNameErr" - // existingGalleryName = "existingGalleryName" - - existingGalleryID = 100 +var ( existingStudioID = 101 existingPerformerID = 103 existingTagID = 105 @@ -33,33 +31,18 @@ const ( existingTagName = "existingTagName" existingTagErr = "existingTagErr" missingTagName = "missingTagName" - - errPerformersID = 200 - - missingChecksum = "missingChecksum" - errChecksum = "errChecksum" ) +var testCtx = context.Background() + var ( createdAt = time.Date(2001, time.January, 2, 1, 2, 3, 4, time.Local) updatedAt = time.Date(2002, time.January, 2, 1, 2, 3, 4, time.Local) ) -func TestImporterName(t *testing.T) { - i := Importer{ - Input: jsonschema.Gallery{ - Path: path, - }, - } - - assert.Equal(t, path, i.Name()) -} - func TestImporterPreImport(t *testing.T) { i := Importer{ Input: jsonschema.Gallery{ - Path: path, - Checksum: checksum, Title: title, Date: date, Details: details, @@ -75,27 +58,21 @@ func TestImporterPreImport(t *testing.T) { }, } - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) expectedGallery := models.Gallery{ - Path: models.NullString(path), - Checksum: checksum, - Title: models.NullString(title), - Date: models.SQLiteDate{ - String: date, - Valid: true, - }, - Details: models.NullString(details), - Rating: models.NullInt64(rating), - Organized: organized, - URL: models.NullString(url), - CreatedAt: models.SQLiteTimestamp{ - Timestamp: createdAt, - }, - UpdatedAt: models.SQLiteTimestamp{ - Timestamp: updatedAt, - }, + Title: title, + Date: &dateObj, + Details: details, + Rating: &rating, + Organized: organized, + URL: url, + Files: models.NewRelatedFiles([]file.File{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + CreatedAt: createdAt, + UpdatedAt: updatedAt, } assert.Equal(t, expectedGallery, i.gallery) @@ -108,21 +85,20 @@ func TestImporterPreImportWithStudio(t *testing.T) { StudioWriter: studioReaderWriter, Input: jsonschema.Gallery{ Studio: existingStudioName, - Path: path, }, } - studioReaderWriter.On("FindByName", existingStudioName, false).Return(&models.Studio{ + studioReaderWriter.On("FindByName", testCtx, existingStudioName, false).Return(&models.Studio{ ID: existingStudioID, }, nil).Once() - studioReaderWriter.On("FindByName", existingStudioErr, false).Return(nil, errors.New("FindByName error")).Once() + studioReaderWriter.On("FindByName", testCtx, existingStudioErr, false).Return(nil, errors.New("FindByName error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, int64(existingStudioID), i.gallery.StudioID.Int64) + assert.Equal(t, existingStudioID, *i.gallery.StudioID) i.Input.Studio = existingStudioErr - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) studioReaderWriter.AssertExpectations(t) @@ -134,28 +110,27 @@ func TestImporterPreImportWithMissingStudio(t *testing.T) { i := Importer{ StudioWriter: studioReaderWriter, Input: jsonschema.Gallery{ - Path: path, Studio: missingStudioName, }, MissingRefBehaviour: models.ImportMissingRefEnumFail, } - studioReaderWriter.On("FindByName", missingStudioName, false).Return(nil, nil).Times(3) - studioReaderWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(&models.Studio{ + studioReaderWriter.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Times(3) + studioReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Studio")).Return(&models.Studio{ ID: existingStudioID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, int64(existingStudioID), i.gallery.StudioID.Int64) + assert.Equal(t, existingStudioID, *i.gallery.StudioID) studioReaderWriter.AssertExpectations(t) } @@ -166,16 +141,15 @@ func TestImporterPreImportWithMissingStudioCreateErr(t *testing.T) { i := Importer{ StudioWriter: studioReaderWriter, Input: jsonschema.Gallery{ - Path: path, Studio: missingStudioName, }, MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - studioReaderWriter.On("FindByName", missingStudioName, false).Return(nil, nil).Once() - studioReaderWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) + studioReaderWriter.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Once() + studioReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } @@ -186,27 +160,26 @@ func TestImporterPreImportWithPerformer(t *testing.T) { PerformerWriter: performerReaderWriter, MissingRefBehaviour: models.ImportMissingRefEnumFail, Input: jsonschema.Gallery{ - Path: path, Performers: []string{ existingPerformerName, }, }, } - performerReaderWriter.On("FindByNames", []string{existingPerformerName}, false).Return([]*models.Performer{ + performerReaderWriter.On("FindByNames", testCtx, []string{existingPerformerName}, false).Return([]*models.Performer{ { ID: existingPerformerID, Name: models.NullString(existingPerformerName), }, }, nil).Once() - performerReaderWriter.On("FindByNames", []string{existingPerformerErr}, false).Return(nil, errors.New("FindByNames error")).Once() + performerReaderWriter.On("FindByNames", testCtx, []string{existingPerformerErr}, false).Return(nil, errors.New("FindByNames error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingPerformerID, i.performers[0].ID) + assert.Equal(t, []int{existingPerformerID}, i.gallery.PerformerIDs.List()) i.Input.Performers = []string{existingPerformerErr} - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) performerReaderWriter.AssertExpectations(t) @@ -218,7 +191,6 @@ func TestImporterPreImportWithMissingPerformer(t *testing.T) { i := Importer{ PerformerWriter: performerReaderWriter, Input: jsonschema.Gallery{ - Path: path, Performers: []string{ missingPerformerName, }, @@ -226,22 +198,22 @@ func TestImporterPreImportWithMissingPerformer(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - performerReaderWriter.On("FindByNames", []string{missingPerformerName}, false).Return(nil, nil).Times(3) - performerReaderWriter.On("Create", mock.AnythingOfType("models.Performer")).Return(&models.Performer{ + performerReaderWriter.On("FindByNames", testCtx, []string{missingPerformerName}, false).Return(nil, nil).Times(3) + performerReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Performer")).Return(&models.Performer{ ID: existingPerformerID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingPerformerID, i.performers[0].ID) + assert.Equal(t, []int{existingPerformerID}, i.gallery.PerformerIDs.List()) performerReaderWriter.AssertExpectations(t) } @@ -252,7 +224,6 @@ func TestImporterPreImportWithMissingPerformerCreateErr(t *testing.T) { i := Importer{ PerformerWriter: performerReaderWriter, Input: jsonschema.Gallery{ - Path: path, Performers: []string{ missingPerformerName, }, @@ -260,10 +231,10 @@ func TestImporterPreImportWithMissingPerformerCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - performerReaderWriter.On("FindByNames", []string{missingPerformerName}, false).Return(nil, nil).Once() - performerReaderWriter.On("Create", mock.AnythingOfType("models.Performer")).Return(nil, errors.New("Create error")) + performerReaderWriter.On("FindByNames", testCtx, []string{missingPerformerName}, false).Return(nil, nil).Once() + performerReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Performer")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } @@ -274,27 +245,26 @@ func TestImporterPreImportWithTag(t *testing.T) { TagWriter: tagReaderWriter, MissingRefBehaviour: models.ImportMissingRefEnumFail, Input: jsonschema.Gallery{ - Path: path, Tags: []string{ existingTagName, }, }, } - tagReaderWriter.On("FindByNames", []string{existingTagName}, false).Return([]*models.Tag{ + tagReaderWriter.On("FindByNames", testCtx, []string{existingTagName}, false).Return([]*models.Tag{ { ID: existingTagID, Name: existingTagName, }, }, nil).Once() - tagReaderWriter.On("FindByNames", []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Once() + tagReaderWriter.On("FindByNames", testCtx, []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingTagID, i.tags[0].ID) + assert.Equal(t, []int{existingTagID}, i.gallery.TagIDs.List()) i.Input.Tags = []string{existingTagErr} - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) tagReaderWriter.AssertExpectations(t) @@ -306,7 +276,6 @@ func TestImporterPreImportWithMissingTag(t *testing.T) { i := Importer{ TagWriter: tagReaderWriter, Input: jsonschema.Gallery{ - Path: path, Tags: []string{ missingTagName, }, @@ -314,22 +283,22 @@ func TestImporterPreImportWithMissingTag(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - tagReaderWriter.On("FindByNames", []string{missingTagName}, false).Return(nil, nil).Times(3) - tagReaderWriter.On("Create", mock.AnythingOfType("models.Tag")).Return(&models.Tag{ + tagReaderWriter.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Times(3) + tagReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(&models.Tag{ ID: existingTagID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingTagID, i.tags[0].ID) + assert.Equal(t, []int{existingTagID}, i.gallery.TagIDs.List()) tagReaderWriter.AssertExpectations(t) } @@ -340,7 +309,6 @@ func TestImporterPreImportWithMissingTagCreateErr(t *testing.T) { i := Importer{ TagWriter: tagReaderWriter, Input: jsonschema.Gallery{ - Path: path, Tags: []string{ missingTagName, }, @@ -348,152 +316,9 @@ func TestImporterPreImportWithMissingTagCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - tagReaderWriter.On("FindByNames", []string{missingTagName}, false).Return(nil, nil).Once() - tagReaderWriter.On("Create", mock.AnythingOfType("models.Tag")).Return(nil, errors.New("Create error")) + tagReaderWriter.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Once() + tagReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } - -func TestImporterPostImportUpdatePerformers(t *testing.T) { - galleryReaderWriter := &mocks.GalleryReaderWriter{} - - i := Importer{ - ReaderWriter: galleryReaderWriter, - performers: []*models.Performer{ - { - ID: existingPerformerID, - }, - }, - } - - updateErr := errors.New("UpdatePerformers error") - - galleryReaderWriter.On("UpdatePerformers", galleryID, []int{existingPerformerID}).Return(nil).Once() - galleryReaderWriter.On("UpdatePerformers", errPerformersID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - - err := i.PostImport(galleryID) - assert.Nil(t, err) - - err = i.PostImport(errPerformersID) - assert.NotNil(t, err) - - galleryReaderWriter.AssertExpectations(t) -} - -func TestImporterPostImportUpdateTags(t *testing.T) { - galleryReaderWriter := &mocks.GalleryReaderWriter{} - - i := Importer{ - ReaderWriter: galleryReaderWriter, - tags: []*models.Tag{ - { - ID: existingTagID, - }, - }, - } - - updateErr := errors.New("UpdateTags error") - - galleryReaderWriter.On("UpdateTags", galleryID, []int{existingTagID}).Return(nil).Once() - galleryReaderWriter.On("UpdateTags", errTagsID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - - err := i.PostImport(galleryID) - assert.Nil(t, err) - - err = i.PostImport(errTagsID) - assert.NotNil(t, err) - - galleryReaderWriter.AssertExpectations(t) -} - -func TestImporterFindExistingID(t *testing.T) { - readerWriter := &mocks.GalleryReaderWriter{} - - i := Importer{ - ReaderWriter: readerWriter, - Input: jsonschema.Gallery{ - Path: path, - Checksum: missingChecksum, - }, - } - - expectedErr := errors.New("FindBy* error") - readerWriter.On("FindByChecksum", missingChecksum).Return(nil, nil).Once() - readerWriter.On("FindByChecksum", checksum).Return(&models.Gallery{ - ID: existingGalleryID, - }, nil).Once() - readerWriter.On("FindByChecksum", errChecksum).Return(nil, expectedErr).Once() - - id, err := i.FindExistingID() - assert.Nil(t, id) - assert.Nil(t, err) - - i.Input.Checksum = checksum - id, err = i.FindExistingID() - assert.Equal(t, existingGalleryID, *id) - assert.Nil(t, err) - - i.Input.Checksum = errChecksum - id, err = i.FindExistingID() - assert.Nil(t, id) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestCreate(t *testing.T) { - readerWriter := &mocks.GalleryReaderWriter{} - - gallery := models.Gallery{ - Title: models.NullString(title), - } - - galleryErr := models.Gallery{ - Title: models.NullString(galleryNameErr), - } - - i := Importer{ - ReaderWriter: readerWriter, - gallery: gallery, - } - - errCreate := errors.New("Create error") - readerWriter.On("Create", gallery).Return(&models.Gallery{ - ID: galleryID, - }, nil).Once() - readerWriter.On("Create", galleryErr).Return(nil, errCreate).Once() - - id, err := i.Create() - assert.Equal(t, galleryID, *id) - assert.Nil(t, err) - - i.gallery = galleryErr - id, err = i.Create() - assert.Nil(t, id) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestUpdate(t *testing.T) { - readerWriter := &mocks.GalleryReaderWriter{} - - gallery := models.Gallery{ - Title: models.NullString(title), - } - - i := Importer{ - ReaderWriter: readerWriter, - gallery: gallery, - } - - // id needs to be set for the mock input - gallery.ID = galleryID - readerWriter.On("Update", gallery).Return(nil, nil).Once() - - err := i.Update(galleryID) - assert.Nil(t, err) - - readerWriter.AssertExpectations(t) -} diff --git a/pkg/gallery/query.go b/pkg/gallery/query.go index f15e480f2..dc97cec2b 100644 --- a/pkg/gallery/query.go +++ b/pkg/gallery/query.go @@ -1,12 +1,30 @@ package gallery import ( + "context" "strconv" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" ) -func CountByPerformerID(r models.GalleryReader, id int) (int, error) { +type Queryer interface { + Query(ctx context.Context, galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) ([]*models.Gallery, int, error) +} + +type CountQueryer interface { + QueryCount(ctx context.Context, galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) (int, error) +} + +type Finder interface { + FindByPath(ctx context.Context, p string) ([]*models.Gallery, error) + FindUserGalleryByTitle(ctx context.Context, title string) ([]*models.Gallery, error) + FindByFolderID(ctx context.Context, folderID file.FolderID) ([]*models.Gallery, error) + FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Gallery, error) + FindByFingerprints(ctx context.Context, fp []file.Fingerprint) ([]*models.Gallery, error) +} + +func CountByPerformerID(ctx context.Context, r CountQueryer, id int) (int, error) { filter := &models.GalleryFilterType{ Performers: &models.MultiCriterionInput{ Value: []string{strconv.Itoa(id)}, @@ -14,10 +32,10 @@ func CountByPerformerID(r models.GalleryReader, id int) (int, error) { }, } - return r.QueryCount(filter, nil) + return r.QueryCount(ctx, filter, nil) } -func CountByStudioID(r models.GalleryReader, id int) (int, error) { +func CountByStudioID(ctx context.Context, r CountQueryer, id int) (int, error) { filter := &models.GalleryFilterType{ Studios: &models.HierarchicalMultiCriterionInput{ Value: []string{strconv.Itoa(id)}, @@ -25,10 +43,10 @@ func CountByStudioID(r models.GalleryReader, id int) (int, error) { }, } - return r.QueryCount(filter, nil) + return r.QueryCount(ctx, filter, nil) } -func CountByTagID(r models.GalleryReader, id int) (int, error) { +func CountByTagID(ctx context.Context, r CountQueryer, id int) (int, error) { filter := &models.GalleryFilterType{ Tags: &models.HierarchicalMultiCriterionInput{ Value: []string{strconv.Itoa(id)}, @@ -36,5 +54,5 @@ func CountByTagID(r models.GalleryReader, id int) (int, error) { }, } - return r.QueryCount(filter, nil) + return r.QueryCount(ctx, filter, nil) } diff --git a/pkg/gallery/scan.go b/pkg/gallery/scan.go index f45a26d77..3908f1cc2 100644 --- a/pkg/gallery/scan.go +++ b/pkg/gallery/scan.go @@ -1,244 +1,133 @@ package gallery import ( - "archive/zip" "context" - "database/sql" "fmt" + "path/filepath" "strings" "time" "github.com/stashapp/stash/pkg/file" - "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/models/paths" "github.com/stashapp/stash/pkg/plugin" - "github.com/stashapp/stash/pkg/utils" ) -const mutexType = "gallery" - -type Scanner struct { - file.Scanner - - ImageExtensions []string - StripFileExtension bool - CaseSensitiveFs bool - TxnManager models.TransactionManager - Paths *paths.Paths - PluginCache *plugin.Cache - MutexManager *utils.MutexManager +type FinderCreatorUpdater interface { + Finder + Create(ctx context.Context, newGallery *models.Gallery, fileIDs []file.ID) error + AddFileID(ctx context.Context, id int, fileID file.ID) error + models.FileLoader } -func FileScanner(hasher file.Hasher) file.Scanner { - return file.Scanner{ - Hasher: hasher, - CalculateMD5: true, - } +type SceneFinderUpdater interface { + FindByPath(ctx context.Context, p string) ([]*models.Scene, error) + Update(ctx context.Context, updatedScene *models.Scene) error + AddGalleryIDs(ctx context.Context, sceneID int, galleryIDs []int) error } -func (scanner *Scanner) ScanExisting(ctx context.Context, existing file.FileBased, file file.SourceFile) (retGallery *models.Gallery, scanImages bool, err error) { - scanned, err := scanner.Scanner.ScanExisting(existing, file) +type ScanHandler struct { + CreatorUpdater FullCreatorUpdater + SceneFinderUpdater SceneFinderUpdater + + PluginCache *plugin.Cache +} + +func (h *ScanHandler) Handle(ctx context.Context, f file.File) error { + baseFile := f.Base() + + // try to match the file to a gallery + existing, err := h.CreatorUpdater.FindByFileID(ctx, f.Base().ID) if err != nil { - return nil, false, err + return fmt.Errorf("finding existing gallery: %w", err) } - // we don't currently store sizes for gallery files - // clear the file size so that we don't incorrectly detect a - // change - scanned.New.Size = "" - - retGallery = existing.(*models.Gallery) - - path := scanned.New.Path - - changed := false - - if scanned.ContentsChanged() { - retGallery.SetFile(*scanned.New) - changed = true - } else if scanned.FileUpdated() { - logger.Infof("Updated gallery file %s", path) - - retGallery.SetFile(*scanned.New) - changed = true + if len(existing) == 0 { + // try also to match file by fingerprints + existing, err = h.CreatorUpdater.FindByFingerprints(ctx, baseFile.Fingerprints) + if err != nil { + return fmt.Errorf("finding existing gallery by fingerprints: %w", err) + } } - if changed { - scanImages = true - logger.Infof("%s has been updated: rescanning", path) - - retGallery.UpdatedAt = models.SQLiteTimestamp{Timestamp: time.Now()} - - // we are operating on a checksum now, so grab a mutex on the checksum - done := make(chan struct{}) - scanner.MutexManager.Claim(mutexType, scanned.New.Checksum, done) - - if err := scanner.TxnManager.WithTxn(ctx, func(r models.Repository) error { - // free the mutex once transaction is complete - defer close(done) - - // ensure no clashes of hashes - if scanned.New.Checksum != "" && scanned.Old.Checksum != scanned.New.Checksum { - dupe, _ := r.Gallery().FindByChecksum(retGallery.Checksum) - if dupe != nil { - return fmt.Errorf("MD5 for file %s is the same as that of %s", path, dupe.Path.String) - } - } - - retGallery, err = r.Gallery().Update(*retGallery) + if len(existing) > 0 { + if err := h.associateExisting(ctx, existing, f); err != nil { return err - }); err != nil { - return nil, false, err } - - scanner.PluginCache.ExecutePostHooks(ctx, retGallery.ID, plugin.GalleryUpdatePost, nil, nil) - } - - return -} - -func (scanner *Scanner) ScanNew(ctx context.Context, file file.SourceFile) (retGallery *models.Gallery, scanImages bool, err error) { - scanned, err := scanner.Scanner.ScanNew(file) - if err != nil { - return nil, false, err - } - - path := file.Path() - checksum := scanned.Checksum - isNewGallery := false - isUpdatedGallery := false - var g *models.Gallery - - // grab a mutex on the checksum - done := make(chan struct{}) - scanner.MutexManager.Claim(mutexType, checksum, done) - defer close(done) - - if err := scanner.TxnManager.WithTxn(ctx, func(r models.Repository) error { - qb := r.Gallery() - - g, _ = qb.FindByChecksum(checksum) - if g != nil { - exists, _ := fsutil.FileExists(g.Path.String) - if !scanner.CaseSensitiveFs { - // #1426 - if file exists but is a case-insensitive match for the - // original filename, then treat it as a move - if exists && strings.EqualFold(path, g.Path.String) { - exists = false - } - } - - if exists { - logger.Infof("%s already exists. Duplicate of %s ", path, g.Path.String) - } else { - logger.Infof("%s already exists. Updating path...", path) - g.Path = sql.NullString{ - String: path, - Valid: true, - } - g, err = qb.Update(*g) - if err != nil { - return err - } - - isUpdatedGallery = true - } - } else if scanner.hasImages(path) { // don't create gallery if it has no images - currentTime := time.Now() - - g = &models.Gallery{ - Zip: true, - Title: sql.NullString{ - String: fsutil.GetNameFromPath(path, scanner.StripFileExtension), - Valid: true, - }, - CreatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, - UpdatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, - } - - g.SetFile(*scanned) - - // only warn when creating the gallery - ok, err := isZipFileUncompressed(path) - if err == nil && !ok { - logger.Warnf("%s is using above store (0) level compression.", path) - } - - logger.Infof("%s doesn't exist. Creating new item...", path) - g, err = qb.Create(*g) - if err != nil { - return err - } - - scanImages = true - isNewGallery = true - } - - return nil - }); err != nil { - return nil, false, err - } - - if isNewGallery { - scanner.PluginCache.ExecutePostHooks(ctx, g.ID, plugin.GalleryCreatePost, nil, nil) - } else if isUpdatedGallery { - scanner.PluginCache.ExecutePostHooks(ctx, g.ID, plugin.GalleryUpdatePost, nil, nil) - } - - // Also scan images if zip file has been moved (ie updated) as the image paths are no longer valid - scanImages = isNewGallery || isUpdatedGallery - retGallery = g - - return -} - -// IsZipFileUnmcompressed returns true if zip file in path is using 0 compression level -func isZipFileUncompressed(path string) (bool, error) { - r, err := zip.OpenReader(path) - if err != nil { - fmt.Printf("Error reading zip file %s: %s\n", path, err) - return false, err } else { - defer r.Close() - for _, f := range r.File { - if f.FileInfo().IsDir() { // skip dirs, they always get store level compression - continue + // create a new gallery + now := time.Now() + newGallery := &models.Gallery{ + CreatedAt: now, + UpdatedAt: now, + } + + logger.Infof("%s doesn't exist. Creating new gallery...", f.Base().Path) + + if err := h.CreatorUpdater.Create(ctx, newGallery, []file.ID{baseFile.ID}); err != nil { + return fmt.Errorf("creating new gallery: %w", err) + } + + h.PluginCache.ExecutePostHooks(ctx, newGallery.ID, plugin.GalleryCreatePost, nil, nil) + + existing = []*models.Gallery{newGallery} + } + + if err := h.associateScene(ctx, existing, f); err != nil { + return err + } + + return nil +} + +func (h *ScanHandler) associateExisting(ctx context.Context, existing []*models.Gallery, f file.File) error { + for _, i := range existing { + if err := i.LoadFiles(ctx, h.CreatorUpdater); err != nil { + return err + } + + found := false + for _, sf := range i.Files.List() { + if sf.Base().ID == f.Base().ID { + found = true + break } - return f.Method == 0, nil // check compression level of first actual file } + + if !found { + logger.Infof("Adding %s to gallery %s", f.Base().Path, i.DisplayName()) + + if err := h.CreatorUpdater.AddFileID(ctx, i.ID, f.Base().ID); err != nil { + return fmt.Errorf("adding file to gallery: %w", err) + } + } + } - return false, nil + + return nil } -func (scanner *Scanner) isImage(pathname string) bool { - return fsutil.MatchExtension(pathname, scanner.ImageExtensions) -} +func (h *ScanHandler) associateScene(ctx context.Context, existing []*models.Gallery, f file.File) error { + galleryIDs := make([]int, len(existing)) + for i, g := range existing { + galleryIDs[i] = g.ID + } -func (scanner *Scanner) hasImages(path string) bool { - readCloser, err := zip.OpenReader(path) + path := f.Base().Path + withoutExt := strings.TrimSuffix(path, filepath.Ext(path)) + + // find scenes with a file that matches + scenes, err := h.SceneFinderUpdater.FindByPath(ctx, withoutExt) if err != nil { - logger.Warnf("Error while walking gallery zip: %v", err) - return false - } - defer readCloser.Close() - - for _, file := range readCloser.File { - if file.FileInfo().IsDir() { - continue - } - - if strings.Contains(file.Name, "__MACOSX") { - continue - } - - if !scanner.isImage(file.Name) { - continue - } - - return true + return err } - return false + for _, scene := range scenes { + // found related Scene + if err := h.SceneFinderUpdater.AddGalleryIDs(ctx, scene.ID, galleryIDs); err != nil { + return err + } + } + + return nil } diff --git a/pkg/gallery/service.go b/pkg/gallery/service.go new file mode 100644 index 000000000..7d0fb3240 --- /dev/null +++ b/pkg/gallery/service.go @@ -0,0 +1,38 @@ +package gallery + +import ( + "context" + + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/image" + "github.com/stashapp/stash/pkg/models" +) + +type FinderByFile interface { + FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Gallery, error) +} + +type Repository interface { + FinderByFile + Destroy(ctx context.Context, id int) error + models.FileLoader +} + +type ImageFinder interface { + FindByFolderID(ctx context.Context, folder file.FolderID) ([]*models.Image, error) + FindByZipFileID(ctx context.Context, zipFileID file.ID) ([]*models.Image, error) + models.GalleryIDLoader +} + +type ImageService interface { + Destroy(ctx context.Context, i *models.Image, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) error + DestroyZipImages(ctx context.Context, zipFile file.File, fileDeleter *image.FileDeleter, deleteGenerated bool) ([]*models.Image, error) +} + +type Service struct { + Repository Repository + ImageFinder ImageFinder + ImageService ImageService + File file.Store + Folder file.FolderStore +} diff --git a/pkg/gallery/update.go b/pkg/gallery/update.go index 4c16793ca..d881514ee 100644 --- a/pkg/gallery/update.go +++ b/pkg/gallery/update.go @@ -1,63 +1,47 @@ package gallery import ( + "context" + "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/sliceutil/intslice" ) -func UpdateFileModTime(qb models.GalleryWriter, id int, modTime models.NullSQLiteTimestamp) (*models.Gallery, error) { - return qb.UpdatePartial(models.GalleryPartial{ - ID: id, - FileModTime: &modTime, - }) +type PartialUpdater interface { + UpdatePartial(ctx context.Context, id int, updatedGallery models.GalleryPartial) (*models.Gallery, error) } -func AddImage(qb models.GalleryReaderWriter, galleryID int, imageID int) error { - imageIDs, err := qb.GetImageIDs(galleryID) +type ImageUpdater interface { + GetImageIDs(ctx context.Context, galleryID int) ([]int, error) + UpdateImages(ctx context.Context, galleryID int, imageIDs []int) error +} + +func AddImage(ctx context.Context, qb ImageUpdater, galleryID int, imageID int) error { + imageIDs, err := qb.GetImageIDs(ctx, galleryID) if err != nil { return err } imageIDs = intslice.IntAppendUnique(imageIDs, imageID) - return qb.UpdateImages(galleryID, imageIDs) + return qb.UpdateImages(ctx, galleryID, imageIDs) } -func AddPerformer(qb models.GalleryReaderWriter, id int, performerID int) (bool, error) { - performerIDs, err := qb.GetPerformerIDs(id) - if err != nil { - return false, err - } - - oldLen := len(performerIDs) - performerIDs = intslice.IntAppendUnique(performerIDs, performerID) - - if len(performerIDs) != oldLen { - if err := qb.UpdatePerformers(id, performerIDs); err != nil { - return false, err - } - - return true, nil - } - - return false, nil +func AddPerformer(ctx context.Context, qb PartialUpdater, o *models.Gallery, performerID int) error { + _, err := qb.UpdatePartial(ctx, o.ID, models.GalleryPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }) + return err } -func AddTag(qb models.GalleryReaderWriter, id int, tagID int) (bool, error) { - tagIDs, err := qb.GetTagIDs(id) - if err != nil { - return false, err - } - - oldLen := len(tagIDs) - tagIDs = intslice.IntAppendUnique(tagIDs, tagID) - - if len(tagIDs) != oldLen { - if err := qb.UpdateTags(id, tagIDs); err != nil { - return false, err - } - - return true, nil - } - - return false, nil +func AddTag(ctx context.Context, qb PartialUpdater, o *models.Gallery, tagID int) error { + _, err := qb.UpdatePartial(ctx, o.ID, models.GalleryPartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }) + return err } diff --git a/pkg/hash/videophash/phash.go b/pkg/hash/videophash/phash.go index 8e81d894e..8438d9553 100644 --- a/pkg/hash/videophash/phash.go +++ b/pkg/hash/videophash/phash.go @@ -13,6 +13,7 @@ import ( "github.com/stashapp/stash/pkg/ffmpeg" "github.com/stashapp/stash/pkg/ffmpeg/transcoder" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/logger" ) @@ -22,7 +23,7 @@ const ( rows = 5 ) -func Generate(encoder ffmpeg.FFMpeg, videoFile *ffmpeg.VideoFile) (*uint64, error) { +func Generate(encoder ffmpeg.FFMpeg, videoFile *file.VideoFile) (*uint64, error) { sprite, err := generateSprite(encoder, videoFile) if err != nil { return nil, err @@ -75,7 +76,7 @@ func combineImages(images []image.Image) image.Image { return montage } -func generateSprite(encoder ffmpeg.FFMpeg, videoFile *ffmpeg.VideoFile) (image.Image, error) { +func generateSprite(encoder ffmpeg.FFMpeg, videoFile *file.VideoFile) (image.Image, error) { logger.Infof("[generator] generating phash sprite for %s", videoFile.Path) // Generate sprite image offset by 5% on each end to avoid intro/outros diff --git a/pkg/image/delete.go b/pkg/image/delete.go index 35ab3704b..b61e77045 100644 --- a/pkg/image/delete.go +++ b/pkg/image/delete.go @@ -1,6 +1,8 @@ package image import ( + "context" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/models" @@ -8,12 +10,12 @@ import ( ) type Destroyer interface { - Destroy(id int) error + Destroy(ctx context.Context, id int) error } // FileDeleter is an extension of file.Deleter that handles deletion of image files. type FileDeleter struct { - file.Deleter + *file.Deleter Paths *paths.Paths } @@ -30,10 +32,40 @@ func (d *FileDeleter) MarkGeneratedFiles(image *models.Image) error { } // Destroy destroys an image, optionally marking the file and generated files for deletion. -func Destroy(i *models.Image, destroyer Destroyer, fileDeleter *FileDeleter, deleteGenerated, deleteFile bool) error { - // don't try to delete if the image is in a zip file - if deleteFile && !file.IsZipPath(i.Path) { - if err := fileDeleter.Files([]string{i.Path}); err != nil { +func (s *Service) Destroy(ctx context.Context, i *models.Image, fileDeleter *FileDeleter, deleteGenerated, deleteFile bool) error { + return s.destroyImage(ctx, i, fileDeleter, deleteGenerated, deleteFile) +} + +// DestroyZipImages destroys all images in zip, optionally marking the files and generated files for deletion. +// Returns a slice of images that were destroyed. +func (s *Service) DestroyZipImages(ctx context.Context, zipFile file.File, fileDeleter *FileDeleter, deleteGenerated bool) ([]*models.Image, error) { + var imgsDestroyed []*models.Image + + imgs, err := s.Repository.FindByZipFileID(ctx, zipFile.Base().ID) + if err != nil { + return nil, err + } + + for _, img := range imgs { + if err := img.LoadFiles(ctx, s.Repository); err != nil { + return nil, err + } + + const deleteFileInZip = false + if err := s.destroyImage(ctx, img, fileDeleter, deleteGenerated, deleteFileInZip); err != nil { + return nil, err + } + + imgsDestroyed = append(imgsDestroyed, img) + } + + return imgsDestroyed, nil +} + +// Destroy destroys an image, optionally marking the file and generated files for deletion. +func (s *Service) destroyImage(ctx context.Context, i *models.Image, fileDeleter *FileDeleter, deleteGenerated, deleteFile bool) error { + if deleteFile { + if err := s.deleteFiles(ctx, i, fileDeleter); err != nil { return err } } @@ -44,5 +76,35 @@ func Destroy(i *models.Image, destroyer Destroyer, fileDeleter *FileDeleter, del } } - return destroyer.Destroy(i.ID) + return s.Repository.Destroy(ctx, i.ID) +} + +// deleteFiles deletes files for the image from the database and file system, if they are not in use by other images +func (s *Service) deleteFiles(ctx context.Context, i *models.Image, fileDeleter *FileDeleter) error { + if err := i.LoadFiles(ctx, s.Repository); err != nil { + return err + } + + for _, f := range i.Files.List() { + // only delete files where there is no other associated image + otherImages, err := s.Repository.FindByFileID(ctx, f.ID) + if err != nil { + return err + } + + if len(otherImages) > 1 { + // other image associated, don't remove + continue + } + + // don't delete files in zip archives + const deleteFile = true + if f.ZipFileID == nil { + if err := file.Destroy(ctx, s.File, f, fileDeleter.Deleter, deleteFile); err != nil { + return err + } + } + } + + return nil } diff --git a/pkg/image/export.go b/pkg/image/export.go index 3938a39bf..afb811a80 100644 --- a/pkg/image/export.go +++ b/pkg/image/export.go @@ -1,9 +1,12 @@ package image import ( + "context" + "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/studio" ) // ToBasicJSON converts a image object into its JSON object equivalent. It @@ -11,54 +14,43 @@ import ( // of cover image. func ToBasicJSON(image *models.Image) *jsonschema.Image { newImageJSON := jsonschema.Image{ - Checksum: image.Checksum, - CreatedAt: json.JSONTime{Time: image.CreatedAt.Timestamp}, - UpdatedAt: json.JSONTime{Time: image.UpdatedAt.Timestamp}, + Title: image.Title, + CreatedAt: json.JSONTime{Time: image.CreatedAt}, + UpdatedAt: json.JSONTime{Time: image.UpdatedAt}, } - if image.Title.Valid { - newImageJSON.Title = image.Title.String - } - - if image.Rating.Valid { - newImageJSON.Rating = int(image.Rating.Int64) + if image.Rating != nil { + newImageJSON.Rating = *image.Rating } newImageJSON.Organized = image.Organized newImageJSON.OCounter = image.OCounter - newImageJSON.File = getImageFileJSON(image) + for _, f := range image.Files.List() { + newImageJSON.Files = append(newImageJSON.Files, f.Base().Path) + } return &newImageJSON } -func getImageFileJSON(image *models.Image) *jsonschema.ImageFile { - ret := &jsonschema.ImageFile{} +// func getImageFileJSON(image *models.Image) *jsonschema.ImageFile { +// ret := &jsonschema.ImageFile{} - if image.FileModTime.Valid { - ret.ModTime = json.JSONTime{Time: image.FileModTime.Timestamp} - } +// f := image.PrimaryFile() - if image.Size.Valid { - ret.Size = int(image.Size.Int64) - } +// ret.ModTime = json.JSONTime{Time: f.ModTime} +// ret.Size = f.Size +// ret.Width = f.Width +// ret.Height = f.Height - if image.Width.Valid { - ret.Width = int(image.Width.Int64) - } - - if image.Height.Valid { - ret.Height = int(image.Height.Int64) - } - - return ret -} +// return ret +// } // GetStudioName returns the name of the provided image's studio. It returns an // empty string if there is no studio assigned to the image. -func GetStudioName(reader models.StudioReader, image *models.Image) (string, error) { - if image.StudioID.Valid { - studio, err := reader.Find(int(image.StudioID.Int64)) +func GetStudioName(ctx context.Context, reader studio.Finder, image *models.Image) (string, error) { + if image.StudioID != nil { + studio, err := reader.Find(ctx, *image.StudioID) if err != nil { return "", err } diff --git a/pkg/image/export_test.go b/pkg/image/export_test.go index 0a449c443..6350b7302 100644 --- a/pkg/image/export_test.go +++ b/pkg/image/export_test.go @@ -3,6 +3,7 @@ package image import ( "errors" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" "github.com/stashapp/stash/pkg/models/jsonschema" @@ -15,43 +16,22 @@ import ( const ( imageID = 1 - // noImageID = 2 - errImageID = 3 studioID = 4 missingStudioID = 5 errStudioID = 6 - - // noGalleryID = 7 - // errGalleryID = 8 - - // noTagsID = 11 - errTagsID = 12 - - // noMoviesID = 13 - // errMoviesID = 14 - // errFindMovieID = 15 - - // noMarkersID = 16 - // errMarkersID = 17 - // errFindPrimaryTagID = 18 - // errFindByMarkerID = 19 ) -const ( - checksum = "checksum" +var ( title = "title" rating = 5 organized = true ocounter = 2 - size = 123 - width = 100 - height = 100 ) const ( studioName = "studioName" - // galleryChecksum = "galleryChecksum" + path = "path" ) var ( @@ -61,36 +41,30 @@ var ( func createFullImage(id int) models.Image { return models.Image{ - ID: id, - Title: models.NullString(title), - Checksum: checksum, - Height: models.NullInt64(height), + ID: id, + Files: models.NewRelatedImageFiles([]*file.ImageFile{ + { + BaseFile: &file.BaseFile{ + Path: path, + }, + }, + }), + Title: title, OCounter: ocounter, - Rating: models.NullInt64(rating), - Size: models.NullInt64(int64(size)), + Rating: &rating, Organized: organized, - Width: models.NullInt64(width), - CreatedAt: models.SQLiteTimestamp{ - Timestamp: createTime, - }, - UpdatedAt: models.SQLiteTimestamp{ - Timestamp: updateTime, - }, + CreatedAt: createTime, + UpdatedAt: updateTime, } } func createFullJSONImage() *jsonschema.Image { return &jsonschema.Image{ Title: title, - Checksum: checksum, OCounter: ocounter, Rating: rating, Organized: organized, - File: &jsonschema.ImageFile{ - Height: height, - Size: size, - Width: width, - }, + Files: []string{path}, CreatedAt: json.JSONTime{ Time: createTime, }, @@ -123,7 +97,7 @@ func TestToJSON(t *testing.T) { func createStudioImage(studioID int) models.Image { return models.Image{ - StudioID: models.NullInt64(int64(studioID)), + StudioID: &studioID, } } @@ -156,15 +130,15 @@ func TestGetStudioName(t *testing.T) { studioErr := errors.New("error getting image") - mockStudioReader.On("Find", studioID).Return(&models.Studio{ + mockStudioReader.On("Find", testCtx, studioID).Return(&models.Studio{ Name: models.NullString(studioName), }, nil).Once() - mockStudioReader.On("Find", missingStudioID).Return(nil, nil).Once() - mockStudioReader.On("Find", errStudioID).Return(nil, studioErr).Once() + mockStudioReader.On("Find", testCtx, missingStudioID).Return(nil, nil).Once() + mockStudioReader.On("Find", testCtx, errStudioID).Return(nil, studioErr).Once() for i, s := range getStudioScenarios { image := s.input - json, err := GetStudioName(mockStudioReader, &image) + json, err := GetStudioName(testCtx, mockStudioReader, &image) switch { case !s.err && err != nil: @@ -178,48 +152,3 @@ func TestGetStudioName(t *testing.T) { mockStudioReader.AssertExpectations(t) } - -// var getGalleryChecksumScenarios = []stringTestScenario{ -// { -// createEmptyImage(imageID), -// galleryChecksum, -// false, -// }, -// { -// createEmptyImage(noGalleryID), -// "", -// false, -// }, -// { -// createEmptyImage(errGalleryID), -// "", -// true, -// }, -// } - -// func TestGetGalleryChecksum(t *testing.T) { -// mockGalleryReader := &mocks.GalleryReaderWriter{} - -// galleryErr := errors.New("error getting gallery") - -// mockGalleryReader.On("FindByImageID", imageID).Return(&models.Gallery{ -// Checksum: galleryChecksum, -// }, nil).Once() -// mockGalleryReader.On("FindByImageID", noGalleryID).Return(nil, nil).Once() -// mockGalleryReader.On("FindByImageID", errGalleryID).Return(nil, galleryErr).Once() - -// for i, s := range getGalleryChecksumScenarios { -// image := s.input -// json, err := GetGalleryChecksum(mockGalleryReader, &image) - -// if !s.err && err != nil { -// t.Errorf("[%d] unexpected error: %s", i, err.Error()) -// } else if s.err && err == nil { -// t.Errorf("[%d] expected error not returned", i) -// } else { -// assert.Equal(t, s.expected, json, "[%d]", i) -// } -// } - -// mockGalleryReader.AssertExpectations(t) -// } diff --git a/pkg/image/image.go b/pkg/image/image.go index 668a65513..00c8b3be2 100644 --- a/pkg/image/image.go +++ b/pkg/image/image.go @@ -1,250 +1,12 @@ package image import ( - "archive/zip" - "database/sql" - "fmt" - "image" - "io" - "net/http" - "os" - "path/filepath" "strings" - "time" - "github.com/stashapp/stash/pkg/file" - "github.com/stashapp/stash/pkg/fsutil" - "github.com/stashapp/stash/pkg/hash/md5" - "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" _ "golang.org/x/image/webp" ) -func GetSourceImage(i *models.Image) (image.Image, error) { - f, err := openSourceImage(i.Path) - if err != nil { - return nil, err - } - defer f.Close() - - srcImage, _, err := image.Decode(f) - if err != nil { - return nil, err - } - - return srcImage, nil -} - -func DecodeSourceImage(i *models.Image) (*image.Config, *string, error) { - f, err := openSourceImage(i.Path) - if err != nil { - return nil, nil, err - } - defer f.Close() - - config, format, err := image.DecodeConfig(f) - - return &config, &format, err -} - -func CalculateMD5(path string) (string, error) { - f, err := openSourceImage(path) - if err != nil { - return "", err - } - defer f.Close() - - return md5.FromReader(f) -} - -func FileExists(path string) bool { - f, err := openSourceImage(path) - if err != nil { - return false - } - defer f.Close() - - return true -} - -type imageReadCloser struct { - src io.ReadCloser - zrc *zip.ReadCloser -} - -func (i *imageReadCloser) Read(p []byte) (n int, err error) { - return i.src.Read(p) -} - -func (i *imageReadCloser) Close() error { - err := i.src.Close() - var err2 error - if i.zrc != nil { - err2 = i.zrc.Close() - } - - if err != nil { - return err - } - return err2 -} - -func openSourceImage(path string) (io.ReadCloser, error) { - // may need to read from a zip file - zipFilename, filename := file.ZipFilePath(path) - if zipFilename != "" { - r, err := zip.OpenReader(zipFilename) - if err != nil { - return nil, err - } - - // defer closing of zip to the calling function, unless an error - // is returned, in which case it should be closed immediately - - // find the file matching the filename - for _, f := range r.File { - if f.Name == filename { - src, err := f.Open() - if err != nil { - r.Close() - return nil, err - } - return &imageReadCloser{ - src: src, - zrc: r, - }, nil - } - } - - r.Close() - return nil, fmt.Errorf("file with name '%s' not found in zip file '%s'", filename, zipFilename) - } - - return os.Open(filename) -} - -// GetFileDetails returns a pointer to an Image object with the -// width, height and size populated. -func GetFileDetails(path string) (*models.Image, error) { - i := &models.Image{ - Path: path, - } - - err := SetFileDetails(i) - if err != nil { - return nil, err - } - - return i, nil -} - -func SetFileDetails(i *models.Image) error { - f, err := stat(i.Path) - if err != nil { - return err - } - - config, _, err := DecodeSourceImage(i) - - if err == nil { - i.Width = sql.NullInt64{ - Int64: int64(config.Width), - Valid: true, - } - i.Height = sql.NullInt64{ - Int64: int64(config.Height), - Valid: true, - } - } - - i.Size = sql.NullInt64{ - Int64: int64(f.Size()), - Valid: true, - } - - return nil -} - -// GetFileModTime gets the file modification time, handling files in zip files. -func GetFileModTime(path string) (time.Time, error) { - fi, err := stat(path) - if err != nil { - return time.Time{}, fmt.Errorf("error performing stat on %s: %s", path, err.Error()) - } - - ret := fi.ModTime() - // truncate to seconds, since we don't store beyond that in the database - ret = ret.Truncate(time.Second) - - return ret, nil -} - -func stat(path string) (os.FileInfo, error) { - // may need to read from a zip file - zipFilename, filename := file.ZipFilePath(path) - if zipFilename != "" { - r, err := zip.OpenReader(zipFilename) - if err != nil { - return nil, err - } - defer r.Close() - - // find the file matching the filename - for _, f := range r.File { - if f.Name == filename { - return f.FileInfo(), nil - } - } - - return nil, fmt.Errorf("file with name '%s' not found in zip file '%s'", filename, zipFilename) - } - - return os.Stat(filename) -} - -func Serve(w http.ResponseWriter, r *http.Request, path string) { - zipFilename, _ := file.ZipFilePath(path) - w.Header().Add("Cache-Control", "max-age=604800000") // 1 Week - if zipFilename == "" { - http.ServeFile(w, r, path) - } else { - rc, err := openSourceImage(path) - if err != nil { - // assume not found - http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) - return - } - defer rc.Close() - - data, err := io.ReadAll(rc) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if k, err := w.Write(data); err != nil { - logger.Warnf("failure while serving image (wrote %v bytes out of %v): %v", k, len(data), err) - } - } -} - func IsCover(img *models.Image) bool { - _, fn := file.ZipFilePath(img.Path) - return strings.HasSuffix(fn, "cover.jpg") -} - -func GetTitle(s *models.Image) string { - if s.Title.String != "" { - return s.Title.String - } - - _, fn := file.ZipFilePath(s.Path) - return filepath.Base(fn) -} - -// GetFilename gets the base name of the image file -// If stripExt is set the file extension is omitted from the name -func GetFilename(s *models.Image, stripExt bool) string { - _, fn := file.ZipFilePath(s.Path) - return fsutil.GetNameFromPath(fn, stripExt) + return strings.HasSuffix(img.Path, "cover.jpg") } diff --git a/pkg/image/import.go b/pkg/image/import.go index 78b60c4b1..7c19a5629 100644 --- a/pkg/image/import.go +++ b/pkg/image/import.go @@ -1,48 +1,63 @@ package image import ( - "database/sql" + "context" "fmt" "strings" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/performer" "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/studio" + "github.com/stashapp/stash/pkg/tag" ) -type Importer struct { - ReaderWriter models.ImageReaderWriter - StudioWriter models.StudioReaderWriter - GalleryWriter models.GalleryReaderWriter - PerformerWriter models.PerformerReaderWriter - TagWriter models.TagReaderWriter - Input jsonschema.Image - Path string - MissingRefBehaviour models.ImportMissingRefEnum - - ID int - image models.Image - galleries []*models.Gallery - performers []*models.Performer - tags []*models.Tag +type GalleryFinder interface { + FindByPath(ctx context.Context, p string) ([]*models.Gallery, error) + FindUserGalleryByTitle(ctx context.Context, title string) ([]*models.Gallery, error) } -func (i *Importer) PreImport() error { +type FullCreatorUpdater interface { + FinderCreatorUpdater + Update(ctx context.Context, updatedImage *models.Image) error +} + +type Importer struct { + ReaderWriter FullCreatorUpdater + FileFinder file.Getter + StudioWriter studio.NameFinderCreator + GalleryFinder GalleryFinder + PerformerWriter performer.NameFinderCreator + TagWriter tag.NameFinderCreator + Input jsonschema.Image + MissingRefBehaviour models.ImportMissingRefEnum + + ID int + image models.Image +} + +func (i *Importer) PreImport(ctx context.Context) error { i.image = i.imageJSONToImage(i.Input) - if err := i.populateStudio(); err != nil { + if err := i.populateFiles(ctx); err != nil { return err } - if err := i.populateGalleries(); err != nil { + if err := i.populateStudio(ctx); err != nil { return err } - if err := i.populatePerformers(); err != nil { + if err := i.populateGalleries(ctx); err != nil { return err } - if err := i.populateTags(); err != nil { + if err := i.populatePerformers(ctx); err != nil { + return err + } + + if err := i.populateTags(ctx); err != nil { return err } @@ -51,40 +66,54 @@ func (i *Importer) PreImport() error { func (i *Importer) imageJSONToImage(imageJSON jsonschema.Image) models.Image { newImage := models.Image{ - Checksum: imageJSON.Checksum, - Path: i.Path, + // Checksum: imageJSON.Checksum, + // Path: i.Path, + PerformerIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + GalleryIDs: models.NewRelatedIDs([]int{}), + + Title: imageJSON.Title, + Organized: imageJSON.Organized, + OCounter: imageJSON.OCounter, + CreatedAt: imageJSON.CreatedAt.GetTime(), + UpdatedAt: imageJSON.UpdatedAt.GetTime(), } if imageJSON.Title != "" { - newImage.Title = sql.NullString{String: imageJSON.Title, Valid: true} + newImage.Title = imageJSON.Title } if imageJSON.Rating != 0 { - newImage.Rating = sql.NullInt64{Int64: int64(imageJSON.Rating), Valid: true} - } - - newImage.Organized = imageJSON.Organized - newImage.OCounter = imageJSON.OCounter - newImage.CreatedAt = models.SQLiteTimestamp{Timestamp: imageJSON.CreatedAt.GetTime()} - newImage.UpdatedAt = models.SQLiteTimestamp{Timestamp: imageJSON.UpdatedAt.GetTime()} - - if imageJSON.File != nil { - if imageJSON.File.Size != 0 { - newImage.Size = sql.NullInt64{Int64: int64(imageJSON.File.Size), Valid: true} - } - if imageJSON.File.Width != 0 { - newImage.Width = sql.NullInt64{Int64: int64(imageJSON.File.Width), Valid: true} - } - if imageJSON.File.Height != 0 { - newImage.Height = sql.NullInt64{Int64: int64(imageJSON.File.Height), Valid: true} - } + newImage.Rating = &imageJSON.Rating } return newImage } -func (i *Importer) populateStudio() error { +func (i *Importer) populateFiles(ctx context.Context) error { + files := make([]*file.ImageFile, 0) + + for _, ref := range i.Input.Files { + path := ref + f, err := i.FileFinder.FindByPath(ctx, path) + if err != nil { + return fmt.Errorf("error finding file: %w", err) + } + + if f == nil { + return fmt.Errorf("image file '%s' not found", path) + } else { + files = append(files, f.(*file.ImageFile)) + } + } + + i.image.Files = models.NewRelatedImageFiles(files) + + return nil +} + +func (i *Importer) populateStudio(ctx context.Context) error { if i.Input.Studio != "" { - studio, err := i.StudioWriter.FindByName(i.Input.Studio, false) + studio, err := i.StudioWriter.FindByName(ctx, i.Input.Studio, false) if err != nil { return fmt.Errorf("error finding studio by name: %v", err) } @@ -99,27 +128,24 @@ func (i *Importer) populateStudio() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - studioID, err := i.createStudio(i.Input.Studio) + studioID, err := i.createStudio(ctx, i.Input.Studio) if err != nil { return err } - i.image.StudioID = sql.NullInt64{ - Int64: int64(studioID), - Valid: true, - } + i.image.StudioID = &studioID } } else { - i.image.StudioID = sql.NullInt64{Int64: int64(studio.ID), Valid: true} + i.image.StudioID = &studio.ID } } return nil } -func (i *Importer) createStudio(name string) (int, error) { +func (i *Importer) createStudio(ctx context.Context, name string) (int, error) { newStudio := *models.NewStudio(name) - created, err := i.StudioWriter.Create(newStudio) + created, err := i.StudioWriter.Create(ctx, newStudio) if err != nil { return 0, err } @@ -127,16 +153,45 @@ func (i *Importer) createStudio(name string) (int, error) { return created.ID, nil } -func (i *Importer) populateGalleries() error { - for _, checksum := range i.Input.Galleries { - gallery, err := i.GalleryWriter.FindByChecksum(checksum) +func (i *Importer) locateGallery(ctx context.Context, ref jsonschema.GalleryRef) (*models.Gallery, error) { + var galleries []*models.Gallery + var err error + switch { + case ref.FolderPath != "": + galleries, err = i.GalleryFinder.FindByPath(ctx, ref.FolderPath) + case len(ref.ZipFiles) > 0: + for _, p := range ref.ZipFiles { + galleries, err = i.GalleryFinder.FindByPath(ctx, p) + if err != nil { + break + } + + if len(galleries) > 0 { + break + } + } + case ref.Title != "": + galleries, err = i.GalleryFinder.FindUserGalleryByTitle(ctx, ref.Title) + } + + var ret *models.Gallery + if len(galleries) > 0 { + ret = galleries[0] + } + + return ret, err +} + +func (i *Importer) populateGalleries(ctx context.Context) error { + for _, ref := range i.Input.Galleries { + gallery, err := i.locateGallery(ctx, ref) if err != nil { return fmt.Errorf("error finding gallery: %v", err) } if gallery == nil { if i.MissingRefBehaviour == models.ImportMissingRefEnumFail { - return fmt.Errorf("image gallery '%s' not found", i.Input.Studio) + return fmt.Errorf("image gallery '%s' not found", ref.String()) } // we don't create galleries - just ignore @@ -144,17 +199,17 @@ func (i *Importer) populateGalleries() error { continue } } else { - i.galleries = append(i.galleries, gallery) + i.image.GalleryIDs.Add(gallery.ID) } } return nil } -func (i *Importer) populatePerformers() error { +func (i *Importer) populatePerformers(ctx context.Context) error { if len(i.Input.Performers) > 0 { names := i.Input.Performers - performers, err := i.PerformerWriter.FindByNames(names, false) + performers, err := i.PerformerWriter.FindByNames(ctx, names, false) if err != nil { return err } @@ -177,7 +232,7 @@ func (i *Importer) populatePerformers() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - createdPerformers, err := i.createPerformers(missingPerformers) + createdPerformers, err := i.createPerformers(ctx, missingPerformers) if err != nil { return fmt.Errorf("error creating image performers: %v", err) } @@ -188,18 +243,20 @@ func (i *Importer) populatePerformers() error { // ignore if MissingRefBehaviour set to Ignore } - i.performers = performers + for _, p := range performers { + i.image.PerformerIDs.Add(p.ID) + } } return nil } -func (i *Importer) createPerformers(names []string) ([]*models.Performer, error) { +func (i *Importer) createPerformers(ctx context.Context, names []string) ([]*models.Performer, error) { var ret []*models.Performer for _, name := range names { newPerformer := *models.NewPerformer(name) - created, err := i.PerformerWriter.Create(newPerformer) + created, err := i.PerformerWriter.Create(ctx, newPerformer) if err != nil { return nil, err } @@ -210,93 +267,81 @@ func (i *Importer) createPerformers(names []string) ([]*models.Performer, error) return ret, nil } -func (i *Importer) populateTags() error { +func (i *Importer) populateTags(ctx context.Context) error { if len(i.Input.Tags) > 0 { - tags, err := importTags(i.TagWriter, i.Input.Tags, i.MissingRefBehaviour) + tags, err := importTags(ctx, i.TagWriter, i.Input.Tags, i.MissingRefBehaviour) if err != nil { return err } - i.tags = tags + for _, t := range tags { + i.image.TagIDs.Add(t.ID) + } } return nil } -func (i *Importer) PostImport(id int) error { - if len(i.galleries) > 0 { - var galleryIDs []int - for _, g := range i.galleries { - galleryIDs = append(galleryIDs, g.ID) - } - - if err := i.ReaderWriter.UpdateGalleries(id, galleryIDs); err != nil { - return fmt.Errorf("failed to associate galleries: %v", err) - } - } - - if len(i.performers) > 0 { - var performerIDs []int - for _, performer := range i.performers { - performerIDs = append(performerIDs, performer.ID) - } - - if err := i.ReaderWriter.UpdatePerformers(id, performerIDs); err != nil { - return fmt.Errorf("failed to associate performers: %v", err) - } - } - - if len(i.tags) > 0 { - var tagIDs []int - for _, t := range i.tags { - tagIDs = append(tagIDs, t.ID) - } - if err := i.ReaderWriter.UpdateTags(id, tagIDs); err != nil { - return fmt.Errorf("failed to associate tags: %v", err) - } - } - +func (i *Importer) PostImport(ctx context.Context, id int) error { return nil } func (i *Importer) Name() string { - return i.Path -} - -func (i *Importer) FindExistingID() (*int, error) { - var existing *models.Image - var err error - existing, err = i.ReaderWriter.FindByChecksum(i.Input.Checksum) - - if err != nil { - return nil, err + if i.Input.Title != "" { + return i.Input.Title } - if existing != nil { - id := existing.ID - return &id, nil + if len(i.Input.Files) > 0 { + return i.Input.Files[0] + } + + return "" +} + +func (i *Importer) FindExistingID(ctx context.Context) (*int, error) { + var existing []*models.Image + var err error + + for _, f := range i.image.Files.List() { + existing, err = i.ReaderWriter.FindByFileID(ctx, f.ID) + if err != nil { + return nil, err + } + + if len(existing) > 0 { + id := existing[0].ID + return &id, nil + } } return nil, nil } -func (i *Importer) Create() (*int, error) { - created, err := i.ReaderWriter.Create(i.image) +func (i *Importer) Create(ctx context.Context) (*int, error) { + var fileIDs []file.ID + for _, f := range i.image.Files.List() { + fileIDs = append(fileIDs, f.Base().ID) + } + + err := i.ReaderWriter.Create(ctx, &models.ImageCreateInput{ + Image: &i.image, + FileIDs: fileIDs, + }) if err != nil { return nil, fmt.Errorf("error creating image: %v", err) } - id := created.ID + id := i.image.ID i.ID = id return &id, nil } -func (i *Importer) Update(id int) error { +func (i *Importer) Update(ctx context.Context, id int) error { image := i.image image.ID = id i.ID = id - _, err := i.ReaderWriter.UpdateFull(image) + err := i.ReaderWriter.Update(ctx, &image) if err != nil { return fmt.Errorf("error updating existing image: %v", err) } @@ -304,8 +349,8 @@ func (i *Importer) Update(id int) error { return nil } -func importTags(tagWriter models.TagReaderWriter, names []string, missingRefBehaviour models.ImportMissingRefEnum) ([]*models.Tag, error) { - tags, err := tagWriter.FindByNames(names, false) +func importTags(ctx context.Context, tagWriter tag.NameFinderCreator, names []string, missingRefBehaviour models.ImportMissingRefEnum) ([]*models.Tag, error) { + tags, err := tagWriter.FindByNames(ctx, names, false) if err != nil { return nil, err } @@ -325,7 +370,7 @@ func importTags(tagWriter models.TagReaderWriter, names []string, missingRefBeha } if missingRefBehaviour == models.ImportMissingRefEnumCreate { - createdTags, err := createTags(tagWriter, missingTags) + createdTags, err := createTags(ctx, tagWriter, missingTags) if err != nil { return nil, fmt.Errorf("error creating tags: %v", err) } @@ -339,12 +384,12 @@ func importTags(tagWriter models.TagReaderWriter, names []string, missingRefBeha return tags, nil } -func createTags(tagWriter models.TagWriter, names []string) ([]*models.Tag, error) { +func createTags(ctx context.Context, tagWriter tag.NameFinderCreator, names []string) ([]*models.Tag, error) { var ret []*models.Tag for _, name := range names { newTag := *models.NewTag(name) - created, err := tagWriter.Create(newTag) + created, err := tagWriter.Create(ctx, newTag) if err != nil { return nil, err } diff --git a/pkg/image/import_test.go b/pkg/image/import_test.go index 156ec96d2..647815127 100644 --- a/pkg/image/import_test.go +++ b/pkg/image/import_test.go @@ -1,6 +1,7 @@ package image import ( + "context" "errors" "testing" @@ -11,27 +12,15 @@ import ( "github.com/stretchr/testify/mock" ) -const ( - path = "path" - - imageNameErr = "imageNameErr" - // existingImageName = "existingImageName" - - existingImageID = 100 +var ( existingStudioID = 101 - existingGalleryID = 102 existingPerformerID = 103 - // existingMovieID = 104 - existingTagID = 105 + existingTagID = 105 existingStudioName = "existingStudioName" existingStudioErr = "existingStudioErr" missingStudioName = "missingStudioName" - existingGalleryChecksum = "existingGalleryChecksum" - existingGalleryErr = "existingGalleryErr" - missingGalleryChecksum = "missingGalleryChecksum" - existingPerformerName = "existingPerformerName" existingPerformerErr = "existingPerformerErr" missingPerformerName = "missingPerformerName" @@ -39,29 +28,14 @@ const ( existingTagName = "existingTagName" existingTagErr = "existingTagErr" missingTagName = "missingTagName" - - errPerformersID = 200 - errGalleriesID = 201 - - missingChecksum = "missingChecksum" - errChecksum = "errChecksum" ) -func TestImporterName(t *testing.T) { - i := Importer{ - Path: path, - Input: jsonschema.Image{}, - } - - assert.Equal(t, path, i.Name()) -} +var testCtx = context.Background() func TestImporterPreImport(t *testing.T) { - i := Importer{ - Path: path, - } + i := Importer{} - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) } @@ -70,23 +44,22 @@ func TestImporterPreImportWithStudio(t *testing.T) { i := Importer{ StudioWriter: studioReaderWriter, - Path: path, Input: jsonschema.Image{ Studio: existingStudioName, }, } - studioReaderWriter.On("FindByName", existingStudioName, false).Return(&models.Studio{ + studioReaderWriter.On("FindByName", testCtx, existingStudioName, false).Return(&models.Studio{ ID: existingStudioID, }, nil).Once() - studioReaderWriter.On("FindByName", existingStudioErr, false).Return(nil, errors.New("FindByName error")).Once() + studioReaderWriter.On("FindByName", testCtx, existingStudioErr, false).Return(nil, errors.New("FindByName error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, int64(existingStudioID), i.image.StudioID.Int64) + assert.Equal(t, existingStudioID, *i.image.StudioID) i.Input.Studio = existingStudioErr - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) studioReaderWriter.AssertExpectations(t) @@ -96,7 +69,6 @@ func TestImporterPreImportWithMissingStudio(t *testing.T) { studioReaderWriter := &mocks.StudioReaderWriter{} i := Importer{ - Path: path, StudioWriter: studioReaderWriter, Input: jsonschema.Image{ Studio: missingStudioName, @@ -104,22 +76,22 @@ func TestImporterPreImportWithMissingStudio(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - studioReaderWriter.On("FindByName", missingStudioName, false).Return(nil, nil).Times(3) - studioReaderWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(&models.Studio{ + studioReaderWriter.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Times(3) + studioReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Studio")).Return(&models.Studio{ ID: existingStudioID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, int64(existingStudioID), i.image.StudioID.Int64) + assert.Equal(t, existingStudioID, *i.image.StudioID) studioReaderWriter.AssertExpectations(t) } @@ -129,90 +101,24 @@ func TestImporterPreImportWithMissingStudioCreateErr(t *testing.T) { i := Importer{ StudioWriter: studioReaderWriter, - Path: path, Input: jsonschema.Image{ Studio: missingStudioName, }, MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - studioReaderWriter.On("FindByName", missingStudioName, false).Return(nil, nil).Once() - studioReaderWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) + studioReaderWriter.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Once() + studioReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } -func TestImporterPreImportWithGallery(t *testing.T) { - galleryReaderWriter := &mocks.GalleryReaderWriter{} - - i := Importer{ - GalleryWriter: galleryReaderWriter, - Path: path, - Input: jsonschema.Image{ - Galleries: []string{ - existingGalleryChecksum, - }, - }, - } - - galleryReaderWriter.On("FindByChecksum", existingGalleryChecksum).Return(&models.Gallery{ - ID: existingGalleryID, - }, nil).Once() - galleryReaderWriter.On("FindByChecksum", existingGalleryErr).Return(nil, errors.New("FindByChecksum error")).Once() - - err := i.PreImport() - assert.Nil(t, err) - assert.Equal(t, existingGalleryID, i.galleries[0].ID) - - i.Input.Galleries = []string{ - existingGalleryErr, - } - - err = i.PreImport() - assert.NotNil(t, err) - - galleryReaderWriter.AssertExpectations(t) -} - -func TestImporterPreImportWithMissingGallery(t *testing.T) { - galleryReaderWriter := &mocks.GalleryReaderWriter{} - - i := Importer{ - Path: path, - GalleryWriter: galleryReaderWriter, - Input: jsonschema.Image{ - Galleries: []string{ - missingGalleryChecksum, - }, - }, - MissingRefBehaviour: models.ImportMissingRefEnumFail, - } - - galleryReaderWriter.On("FindByChecksum", missingGalleryChecksum).Return(nil, nil).Times(3) - - err := i.PreImport() - assert.NotNil(t, err) - - i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() - assert.Nil(t, err) - assert.Nil(t, i.galleries) - - i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() - assert.Nil(t, err) - assert.Nil(t, i.galleries) - - galleryReaderWriter.AssertExpectations(t) -} - func TestImporterPreImportWithPerformer(t *testing.T) { performerReaderWriter := &mocks.PerformerReaderWriter{} i := Importer{ PerformerWriter: performerReaderWriter, - Path: path, MissingRefBehaviour: models.ImportMissingRefEnumFail, Input: jsonschema.Image{ Performers: []string{ @@ -221,20 +127,20 @@ func TestImporterPreImportWithPerformer(t *testing.T) { }, } - performerReaderWriter.On("FindByNames", []string{existingPerformerName}, false).Return([]*models.Performer{ + performerReaderWriter.On("FindByNames", testCtx, []string{existingPerformerName}, false).Return([]*models.Performer{ { ID: existingPerformerID, Name: models.NullString(existingPerformerName), }, }, nil).Once() - performerReaderWriter.On("FindByNames", []string{existingPerformerErr}, false).Return(nil, errors.New("FindByNames error")).Once() + performerReaderWriter.On("FindByNames", testCtx, []string{existingPerformerErr}, false).Return(nil, errors.New("FindByNames error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingPerformerID, i.performers[0].ID) + assert.Equal(t, []int{existingPerformerID}, i.image.PerformerIDs.List()) i.Input.Performers = []string{existingPerformerErr} - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) performerReaderWriter.AssertExpectations(t) @@ -244,7 +150,6 @@ func TestImporterPreImportWithMissingPerformer(t *testing.T) { performerReaderWriter := &mocks.PerformerReaderWriter{} i := Importer{ - Path: path, PerformerWriter: performerReaderWriter, Input: jsonschema.Image{ Performers: []string{ @@ -254,22 +159,22 @@ func TestImporterPreImportWithMissingPerformer(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - performerReaderWriter.On("FindByNames", []string{missingPerformerName}, false).Return(nil, nil).Times(3) - performerReaderWriter.On("Create", mock.AnythingOfType("models.Performer")).Return(&models.Performer{ + performerReaderWriter.On("FindByNames", testCtx, []string{missingPerformerName}, false).Return(nil, nil).Times(3) + performerReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Performer")).Return(&models.Performer{ ID: existingPerformerID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingPerformerID, i.performers[0].ID) + assert.Equal(t, []int{existingPerformerID}, i.image.PerformerIDs.List()) performerReaderWriter.AssertExpectations(t) } @@ -279,7 +184,6 @@ func TestImporterPreImportWithMissingPerformerCreateErr(t *testing.T) { i := Importer{ PerformerWriter: performerReaderWriter, - Path: path, Input: jsonschema.Image{ Performers: []string{ missingPerformerName, @@ -288,10 +192,10 @@ func TestImporterPreImportWithMissingPerformerCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - performerReaderWriter.On("FindByNames", []string{missingPerformerName}, false).Return(nil, nil).Once() - performerReaderWriter.On("Create", mock.AnythingOfType("models.Performer")).Return(nil, errors.New("Create error")) + performerReaderWriter.On("FindByNames", testCtx, []string{missingPerformerName}, false).Return(nil, nil).Once() + performerReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Performer")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } @@ -300,7 +204,6 @@ func TestImporterPreImportWithTag(t *testing.T) { i := Importer{ TagWriter: tagReaderWriter, - Path: path, MissingRefBehaviour: models.ImportMissingRefEnumFail, Input: jsonschema.Image{ Tags: []string{ @@ -309,20 +212,20 @@ func TestImporterPreImportWithTag(t *testing.T) { }, } - tagReaderWriter.On("FindByNames", []string{existingTagName}, false).Return([]*models.Tag{ + tagReaderWriter.On("FindByNames", testCtx, []string{existingTagName}, false).Return([]*models.Tag{ { ID: existingTagID, Name: existingTagName, }, }, nil).Once() - tagReaderWriter.On("FindByNames", []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Once() + tagReaderWriter.On("FindByNames", testCtx, []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingTagID, i.tags[0].ID) + assert.Equal(t, []int{existingTagID}, i.image.TagIDs.List()) i.Input.Tags = []string{existingTagErr} - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) tagReaderWriter.AssertExpectations(t) @@ -332,7 +235,6 @@ func TestImporterPreImportWithMissingTag(t *testing.T) { tagReaderWriter := &mocks.TagReaderWriter{} i := Importer{ - Path: path, TagWriter: tagReaderWriter, Input: jsonschema.Image{ Tags: []string{ @@ -342,22 +244,22 @@ func TestImporterPreImportWithMissingTag(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - tagReaderWriter.On("FindByNames", []string{missingTagName}, false).Return(nil, nil).Times(3) - tagReaderWriter.On("Create", mock.AnythingOfType("models.Tag")).Return(&models.Tag{ + tagReaderWriter.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Times(3) + tagReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(&models.Tag{ ID: existingTagID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingTagID, i.tags[0].ID) + assert.Equal(t, []int{existingTagID}, i.image.TagIDs.List()) tagReaderWriter.AssertExpectations(t) } @@ -367,7 +269,6 @@ func TestImporterPreImportWithMissingTagCreateErr(t *testing.T) { i := Importer{ TagWriter: tagReaderWriter, - Path: path, Input: jsonschema.Image{ Tags: []string{ missingTagName, @@ -376,195 +277,9 @@ func TestImporterPreImportWithMissingTagCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - tagReaderWriter.On("FindByNames", []string{missingTagName}, false).Return(nil, nil).Once() - tagReaderWriter.On("Create", mock.AnythingOfType("models.Tag")).Return(nil, errors.New("Create error")) + tagReaderWriter.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Once() + tagReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } - -func TestImporterPostImportUpdateGallery(t *testing.T) { - readerWriter := &mocks.ImageReaderWriter{} - - i := Importer{ - ReaderWriter: readerWriter, - galleries: []*models.Gallery{ - { - ID: existingGalleryID, - }, - }, - } - - updateErr := errors.New("UpdateGalleries error") - - readerWriter.On("UpdateGalleries", imageID, []int{existingGalleryID}).Return(nil).Once() - readerWriter.On("UpdateGalleries", errGalleriesID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - - err := i.PostImport(imageID) - assert.Nil(t, err) - - err = i.PostImport(errGalleriesID) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestImporterPostImportUpdatePerformers(t *testing.T) { - readerWriter := &mocks.ImageReaderWriter{} - - i := Importer{ - ReaderWriter: readerWriter, - performers: []*models.Performer{ - { - ID: existingPerformerID, - }, - }, - } - - updateErr := errors.New("UpdatePerformers error") - - readerWriter.On("UpdatePerformers", imageID, []int{existingPerformerID}).Return(nil).Once() - readerWriter.On("UpdatePerformers", errPerformersID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - - err := i.PostImport(imageID) - assert.Nil(t, err) - - err = i.PostImport(errPerformersID) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestImporterPostImportUpdateTags(t *testing.T) { - readerWriter := &mocks.ImageReaderWriter{} - - i := Importer{ - ReaderWriter: readerWriter, - tags: []*models.Tag{ - { - ID: existingTagID, - }, - }, - } - - updateErr := errors.New("UpdateTags error") - - readerWriter.On("UpdateTags", imageID, []int{existingTagID}).Return(nil).Once() - readerWriter.On("UpdateTags", errTagsID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - - err := i.PostImport(imageID) - assert.Nil(t, err) - - err = i.PostImport(errTagsID) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestImporterFindExistingID(t *testing.T) { - readerWriter := &mocks.ImageReaderWriter{} - - i := Importer{ - ReaderWriter: readerWriter, - Path: path, - Input: jsonschema.Image{ - Checksum: missingChecksum, - }, - } - - expectedErr := errors.New("FindBy* error") - readerWriter.On("FindByChecksum", missingChecksum).Return(nil, nil).Once() - readerWriter.On("FindByChecksum", checksum).Return(&models.Image{ - ID: existingImageID, - }, nil).Once() - readerWriter.On("FindByChecksum", errChecksum).Return(nil, expectedErr).Once() - - id, err := i.FindExistingID() - assert.Nil(t, id) - assert.Nil(t, err) - - i.Input.Checksum = checksum - id, err = i.FindExistingID() - assert.Equal(t, existingImageID, *id) - assert.Nil(t, err) - - i.Input.Checksum = errChecksum - id, err = i.FindExistingID() - assert.Nil(t, id) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestCreate(t *testing.T) { - readerWriter := &mocks.ImageReaderWriter{} - - image := models.Image{ - Title: models.NullString(title), - } - - imageErr := models.Image{ - Title: models.NullString(imageNameErr), - } - - i := Importer{ - ReaderWriter: readerWriter, - image: image, - } - - errCreate := errors.New("Create error") - readerWriter.On("Create", image).Return(&models.Image{ - ID: imageID, - }, nil).Once() - readerWriter.On("Create", imageErr).Return(nil, errCreate).Once() - - id, err := i.Create() - assert.Equal(t, imageID, *id) - assert.Nil(t, err) - assert.Equal(t, imageID, i.ID) - - i.image = imageErr - id, err = i.Create() - assert.Nil(t, id) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestUpdate(t *testing.T) { - readerWriter := &mocks.ImageReaderWriter{} - - image := models.Image{ - Title: models.NullString(title), - } - - imageErr := models.Image{ - Title: models.NullString(imageNameErr), - } - - i := Importer{ - ReaderWriter: readerWriter, - image: image, - } - - errUpdate := errors.New("Update error") - - // id needs to be set for the mock input - image.ID = imageID - readerWriter.On("UpdateFull", image).Return(nil, nil).Once() - - err := i.Update(imageID) - assert.Nil(t, err) - assert.Equal(t, imageID, i.ID) - - i.image = imageErr - - // need to set id separately - imageErr.ID = errImageID - readerWriter.On("UpdateFull", imageErr).Return(nil, errUpdate).Once() - - err = i.Update(errImageID) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} diff --git a/pkg/image/query.go b/pkg/image/query.go index 058d0a842..36ed3a8c3 100644 --- a/pkg/image/query.go +++ b/pkg/image/query.go @@ -1,13 +1,18 @@ package image import ( + "context" "strconv" "github.com/stashapp/stash/pkg/models" ) type Queryer interface { - Query(options models.ImageQueryOptions) (*models.ImageQueryResult, error) + Query(ctx context.Context, options models.ImageQueryOptions) (*models.ImageQueryResult, error) +} + +type CountQueryer interface { + QueryCount(ctx context.Context, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) (int, error) } // QueryOptions returns a ImageQueryResult populated with the provided filters. @@ -22,13 +27,13 @@ func QueryOptions(imageFilter *models.ImageFilterType, findFilter *models.FindFi } // Query queries for images using the provided filters. -func Query(qb Queryer, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) ([]*models.Image, error) { - result, err := qb.Query(QueryOptions(imageFilter, findFilter, false)) +func Query(ctx context.Context, qb Queryer, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) ([]*models.Image, error) { + result, err := qb.Query(ctx, QueryOptions(imageFilter, findFilter, false)) if err != nil { return nil, err } - images, err := result.Resolve() + images, err := result.Resolve(ctx) if err != nil { return nil, err } @@ -36,7 +41,7 @@ func Query(qb Queryer, imageFilter *models.ImageFilterType, findFilter *models.F return images, nil } -func CountByPerformerID(r models.ImageReader, id int) (int, error) { +func CountByPerformerID(ctx context.Context, r CountQueryer, id int) (int, error) { filter := &models.ImageFilterType{ Performers: &models.MultiCriterionInput{ Value: []string{strconv.Itoa(id)}, @@ -44,10 +49,10 @@ func CountByPerformerID(r models.ImageReader, id int) (int, error) { }, } - return r.QueryCount(filter, nil) + return r.QueryCount(ctx, filter, nil) } -func CountByStudioID(r models.ImageReader, id int) (int, error) { +func CountByStudioID(ctx context.Context, r CountQueryer, id int) (int, error) { filter := &models.ImageFilterType{ Studios: &models.HierarchicalMultiCriterionInput{ Value: []string{strconv.Itoa(id)}, @@ -55,10 +60,10 @@ func CountByStudioID(r models.ImageReader, id int) (int, error) { }, } - return r.QueryCount(filter, nil) + return r.QueryCount(ctx, filter, nil) } -func CountByTagID(r models.ImageReader, id int) (int, error) { +func CountByTagID(ctx context.Context, r CountQueryer, id int) (int, error) { filter := &models.ImageFilterType{ Tags: &models.HierarchicalMultiCriterionInput{ Value: []string{strconv.Itoa(id)}, @@ -66,10 +71,10 @@ func CountByTagID(r models.ImageReader, id int) (int, error) { }, } - return r.QueryCount(filter, nil) + return r.QueryCount(ctx, filter, nil) } -func FindByGalleryID(r models.ImageReader, galleryID int, sortBy string, sortDir models.SortDirectionEnum) ([]*models.Image, error) { +func FindByGalleryID(ctx context.Context, r Queryer, galleryID int, sortBy string, sortDir models.SortDirectionEnum) ([]*models.Image, error) { perPage := -1 findFilter := models.FindFilterType{ @@ -84,7 +89,7 @@ func FindByGalleryID(r models.ImageReader, galleryID int, sortBy string, sortDir findFilter.Direction = &sortDir } - return Query(r, &models.ImageFilterType{ + return Query(ctx, r, &models.ImageFilterType{ Galleries: &models.MultiCriterionInput{ Value: []string{strconv.Itoa(galleryID)}, Modifier: models.CriterionModifierIncludes, diff --git a/pkg/image/scan.go b/pkg/image/scan.go index 8fa2f24a6..4f313ccc5 100644 --- a/pkg/image/scan.go +++ b/pkg/image/scan.go @@ -2,190 +2,226 @@ package image import ( "context" + "errors" "fmt" - "os" - "strings" + "path/filepath" "time" "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/models/paths" "github.com/stashapp/stash/pkg/plugin" - "github.com/stashapp/stash/pkg/utils" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) -const mutexType = "image" +var ( + ErrNotImageFile = errors.New("not an image file") +) -type Scanner struct { - file.Scanner - - StripFileExtension bool - - CaseSensitiveFs bool - TxnManager models.TransactionManager - Paths *paths.Paths - PluginCache *plugin.Cache - MutexManager *utils.MutexManager +type FinderCreatorUpdater interface { + FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Image, error) + FindByFingerprints(ctx context.Context, fp []file.Fingerprint) ([]*models.Image, error) + Create(ctx context.Context, newImage *models.ImageCreateInput) error + AddFileID(ctx context.Context, id int, fileID file.ID) error + models.GalleryIDLoader + models.ImageFileLoader } -func FileScanner(hasher file.Hasher) file.Scanner { - return file.Scanner{ - Hasher: hasher, - CalculateMD5: true, +type GalleryFinderCreator interface { + FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Gallery, error) + FindByFolderID(ctx context.Context, folderID file.FolderID) ([]*models.Gallery, error) + Create(ctx context.Context, newObject *models.Gallery, fileIDs []file.ID) error +} + +type ScanConfig interface { + GetCreateGalleriesFromFolders() bool + IsGenerateThumbnails() bool +} + +type ScanHandler struct { + CreatorUpdater FinderCreatorUpdater + GalleryFinder GalleryFinderCreator + + ThumbnailGenerator ThumbnailGenerator + + ScanConfig ScanConfig + + PluginCache *plugin.Cache +} + +func (h *ScanHandler) validate() error { + if h.CreatorUpdater == nil { + return errors.New("CreatorUpdater is required") } + if h.GalleryFinder == nil { + return errors.New("GalleryFinder is required") + } + if h.ScanConfig == nil { + return errors.New("ScanConfig is required") + } + + return nil } -func (scanner *Scanner) ScanExisting(ctx context.Context, existing file.FileBased, file file.SourceFile) (retImage *models.Image, err error) { - scanned, err := scanner.Scanner.ScanExisting(existing, file) +func (h *ScanHandler) Handle(ctx context.Context, f file.File) error { + if err := h.validate(); err != nil { + return err + } + + imageFile, ok := f.(*file.ImageFile) + if !ok { + return ErrNotImageFile + } + + // try to match the file to an image + existing, err := h.CreatorUpdater.FindByFileID(ctx, imageFile.ID) if err != nil { - return nil, err + return fmt.Errorf("finding existing image: %w", err) } - i := existing.(*models.Image) + if len(existing) == 0 { + // try also to match file by fingerprints + existing, err = h.CreatorUpdater.FindByFingerprints(ctx, imageFile.Fingerprints) + if err != nil { + return fmt.Errorf("finding existing image by fingerprints: %w", err) + } + } - path := scanned.New.Path - oldChecksum := i.Checksum - changed := false - - if scanned.ContentsChanged() { - logger.Infof("%s has been updated: rescanning", path) - - // regenerate the file details as well - if err := SetFileDetails(i); err != nil { - return nil, err + if len(existing) > 0 { + if err := h.associateExisting(ctx, existing, imageFile); err != nil { + return err + } + } else { + // create a new image + now := time.Now() + newImage := &models.Image{ + CreatedAt: now, + UpdatedAt: now, + GalleryIDs: models.NewRelatedIDs([]int{}), } - changed = true - } else if scanned.FileUpdated() { - logger.Infof("Updated image file %s", path) + // if the file is in a zip, then associate it with the gallery + if imageFile.ZipFileID != nil { + g, err := h.GalleryFinder.FindByFileID(ctx, *imageFile.ZipFileID) + if err != nil { + return fmt.Errorf("finding gallery for zip file id %d: %w", *imageFile.ZipFileID, err) + } - changed = true + for _, gg := range g { + newImage.GalleryIDs.Add(gg.ID) + } + } else if h.ScanConfig.GetCreateGalleriesFromFolders() { + if err := h.associateFolderBasedGallery(ctx, newImage, imageFile); err != nil { + return err + } + } + + logger.Infof("%s doesn't exist. Creating new image...", f.Base().Path) + + if err := h.CreatorUpdater.Create(ctx, &models.ImageCreateInput{ + Image: newImage, + FileIDs: []file.ID{imageFile.ID}, + }); err != nil { + return fmt.Errorf("creating new image: %w", err) + } + + h.PluginCache.ExecutePostHooks(ctx, newImage.ID, plugin.ImageCreatePost, nil, nil) + + existing = []*models.Image{newImage} } - if changed { - i.SetFile(*scanned.New) - i.UpdatedAt = models.SQLiteTimestamp{Timestamp: time.Now()} + if h.ScanConfig.IsGenerateThumbnails() { + for _, s := range existing { + if err := h.ThumbnailGenerator.GenerateThumbnail(ctx, s, imageFile); err != nil { + // just log if cover generation fails. We can try again on rescan + logger.Errorf("Error generating thumbnail for %s: %v", imageFile.Path, err) + } + } + } - // we are operating on a checksum now, so grab a mutex on the checksum - done := make(chan struct{}) - scanner.MutexManager.Claim(mutexType, scanned.New.Checksum, done) + return nil +} - if err := scanner.TxnManager.WithTxn(ctx, func(r models.Repository) error { - // free the mutex once transaction is complete - defer close(done) - var err error +func (h *ScanHandler) associateExisting(ctx context.Context, existing []*models.Image, f *file.ImageFile) error { + for _, i := range existing { + if err := i.LoadFiles(ctx, h.CreatorUpdater); err != nil { + return err + } - // ensure no clashes of hashes - if scanned.New.Checksum != "" && scanned.Old.Checksum != scanned.New.Checksum { - dupe, _ := r.Image().FindByChecksum(i.Checksum) - if dupe != nil { - return fmt.Errorf("MD5 for file %s is the same as that of %s", path, dupe.Path) + found := false + for _, sf := range i.Files.List() { + if sf.ID == f.Base().ID { + found = true + break + } + } + + if !found { + logger.Infof("Adding %s to image %s", f.Path, i.DisplayName()) + + // associate with folder-based gallery if applicable + if h.ScanConfig.GetCreateGalleriesFromFolders() { + if err := h.associateFolderBasedGallery(ctx, i, f); err != nil { + return err } } - retImage, err = r.Image().UpdateFull(*i) - return err - }); err != nil { - return nil, err - } - - // remove the old thumbnail if the checksum changed - we'll regenerate it - if oldChecksum != scanned.New.Checksum { - // remove cache dir of gallery - err = os.Remove(scanner.Paths.Generated.GetThumbnailPath(oldChecksum, models.DefaultGthumbWidth)) - if err != nil { - logger.Errorf("Error deleting thumbnail image: %s", err) + if err := h.CreatorUpdater.AddFileID(ctx, i.ID, f.ID); err != nil { + return fmt.Errorf("adding file to image: %w", err) } } - - scanner.PluginCache.ExecutePostHooks(ctx, retImage.ID, plugin.ImageUpdatePost, nil, nil) } - return + return nil } -func (scanner *Scanner) ScanNew(ctx context.Context, f file.SourceFile) (retImage *models.Image, err error) { - scanned, err := scanner.Scanner.ScanNew(f) +func (h *ScanHandler) getOrCreateFolderBasedGallery(ctx context.Context, f file.File) (*models.Gallery, error) { + // don't create folder-based galleries for files in zip file + if f.Base().ZipFileID != nil { + return nil, nil + } + + folderID := f.Base().ParentFolderID + g, err := h.GalleryFinder.FindByFolderID(ctx, folderID) if err != nil { - return nil, err + return nil, fmt.Errorf("finding folder based gallery: %w", err) } - path := f.Path() - checksum := scanned.Checksum - - // grab a mutex on the checksum - done := make(chan struct{}) - scanner.MutexManager.Claim(mutexType, checksum, done) - defer close(done) - - // check for image by checksum - var existingImage *models.Image - if err := scanner.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - var err error - existingImage, err = r.Image().FindByChecksum(checksum) - return err - }); err != nil { - return nil, err + if len(g) > 0 { + gg := g[0] + return gg, nil } - pathDisplayName := file.ZipPathDisplayName(path) - - if existingImage != nil { - exists := FileExists(existingImage.Path) - if !scanner.CaseSensitiveFs { - // #1426 - if file exists but is a case-insensitive match for the - // original filename, then treat it as a move - if exists && strings.EqualFold(path, existingImage.Path) { - exists = false - } - } - - if exists { - logger.Infof("%s already exists. Duplicate of %s ", pathDisplayName, file.ZipPathDisplayName(existingImage.Path)) - return nil, nil - } else { - logger.Infof("%s already exists. Updating path...", pathDisplayName) - imagePartial := models.ImagePartial{ - ID: existingImage.ID, - Path: &path, - } - - if err := scanner.TxnManager.WithTxn(ctx, func(r models.Repository) error { - retImage, err = r.Image().Update(imagePartial) - return err - }); err != nil { - return nil, err - } - - scanner.PluginCache.ExecutePostHooks(ctx, existingImage.ID, plugin.ImageUpdatePost, nil, nil) - } - } else { - logger.Infof("%s doesn't exist. Creating new item...", pathDisplayName) - currentTime := time.Now() - newImage := models.Image{ - CreatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, - UpdatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, - } - newImage.SetFile(*scanned) - newImage.Title.String = GetFilename(&newImage, scanner.StripFileExtension) - newImage.Title.Valid = true - - if err := SetFileDetails(&newImage); err != nil { - logger.Error(err.Error()) - return nil, err - } - - if err := scanner.TxnManager.WithTxn(ctx, func(r models.Repository) error { - var err error - retImage, err = r.Image().Create(newImage) - return err - }); err != nil { - return nil, err - } - - scanner.PluginCache.ExecutePostHooks(ctx, retImage.ID, plugin.ImageCreatePost, nil, nil) + // create a new folder-based gallery + now := time.Now() + newGallery := &models.Gallery{ + FolderID: &folderID, + CreatedAt: now, + UpdatedAt: now, } - return + logger.Infof("Creating folder-based gallery for %s", filepath.Dir(f.Base().Path)) + if err := h.GalleryFinder.Create(ctx, newGallery, nil); err != nil { + return nil, fmt.Errorf("creating folder based gallery: %w", err) + } + + return newGallery, nil +} + +func (h *ScanHandler) associateFolderBasedGallery(ctx context.Context, newImage *models.Image, f file.File) error { + g, err := h.getOrCreateFolderBasedGallery(ctx, f) + if err != nil { + return err + } + + if err := newImage.LoadGalleryIDs(ctx, h.CreatorUpdater); err != nil { + return err + } + + if g != nil && !intslice.IntInclude(newImage.GalleryIDs.List(), g.ID) { + newImage.GalleryIDs.Add(g.ID) + logger.Infof("Adding %s to folder-based gallery %s", f.Base().Path, g.Path) + } + + return nil } diff --git a/pkg/image/service.go b/pkg/image/service.go new file mode 100644 index 000000000..667317735 --- /dev/null +++ b/pkg/image/service.go @@ -0,0 +1,24 @@ +package image + +import ( + "context" + + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/models" +) + +type FinderByFile interface { + FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Image, error) + FindByZipFileID(ctx context.Context, zipFileID file.ID) ([]*models.Image, error) +} + +type Repository interface { + FinderByFile + Destroyer + models.ImageFileLoader +} + +type Service struct { + File file.Store + Repository Repository +} diff --git a/pkg/image/thumbnail.go b/pkg/image/thumbnail.go index 62c84cff6..9fc720a76 100644 --- a/pkg/image/thumbnail.go +++ b/pkg/image/thumbnail.go @@ -5,13 +5,13 @@ import ( "context" "errors" "fmt" - "image" "os/exec" "runtime" "sync" "github.com/stashapp/stash/pkg/ffmpeg" "github.com/stashapp/stash/pkg/ffmpeg/transcoder" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" ) @@ -27,6 +27,10 @@ var ( ErrNotSupportedForThumbnail = errors.New("unsupported image format for thumbnail") ) +type ThumbnailGenerator interface { + GenerateThumbnail(ctx context.Context, i *models.Image, f *file.ImageFile) error +} + type ThumbnailEncoder struct { ffmpeg ffmpeg.FFMpeg vips *vipsEncoder @@ -57,11 +61,12 @@ func NewThumbnailEncoder(ffmpegEncoder ffmpeg.FFMpeg) ThumbnailEncoder { // the provided max size. It resizes based on the largest X/Y direction. // It returns nil and an error if an error occurs reading, decoding or encoding // the image, or if the image is not suitable for thumbnails. -func (e *ThumbnailEncoder) GetThumbnail(img *models.Image, maxSize int) ([]byte, error) { - reader, err := openSourceImage(img.Path) +func (e *ThumbnailEncoder) GetThumbnail(f *file.ImageFile, maxSize int) ([]byte, error) { + reader, err := f.Open(&file.OsFS{}) if err != nil { return nil, err } + defer reader.Close() buf := new(bytes.Buffer) if _, err := buf.ReadFrom(reader); err != nil { @@ -70,13 +75,8 @@ func (e *ThumbnailEncoder) GetThumbnail(img *models.Image, maxSize int) ([]byte, data := buf.Bytes() - // use NewBufferString to copy the buffer, rather than reuse it - _, format, err := image.DecodeConfig(bytes.NewBufferString(string(data))) - if err != nil { - return nil, err - } - - animated := format == formatGif + format := f.Format + animated := f.Format == formatGif // #2266 - if image is webp, then determine if it is animated if format == formatWebP { diff --git a/pkg/image/update.go b/pkg/image/update.go index f8c43f965..3a173b7ad 100644 --- a/pkg/image/update.go +++ b/pkg/image/update.go @@ -1,53 +1,32 @@ package image import ( + "context" + "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/sliceutil/intslice" ) -func UpdateFileModTime(qb models.ImageWriter, id int, modTime models.NullSQLiteTimestamp) (*models.Image, error) { - return qb.Update(models.ImagePartial{ - ID: id, - FileModTime: &modTime, +type PartialUpdater interface { + UpdatePartial(ctx context.Context, id int, partial models.ImagePartial) (*models.Image, error) +} + +func AddPerformer(ctx context.Context, qb PartialUpdater, i *models.Image, performerID int) error { + _, err := qb.UpdatePartial(ctx, i.ID, models.ImagePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, + }, }) + + return err } -func AddPerformer(qb models.ImageReaderWriter, id int, performerID int) (bool, error) { - performerIDs, err := qb.GetPerformerIDs(id) - if err != nil { - return false, err - } - - oldLen := len(performerIDs) - performerIDs = intslice.IntAppendUnique(performerIDs, performerID) - - if len(performerIDs) != oldLen { - if err := qb.UpdatePerformers(id, performerIDs); err != nil { - return false, err - } - - return true, nil - } - - return false, nil -} - -func AddTag(qb models.ImageReaderWriter, id int, tagID int) (bool, error) { - tagIDs, err := qb.GetTagIDs(id) - if err != nil { - return false, err - } - - oldLen := len(tagIDs) - tagIDs = intslice.IntAppendUnique(tagIDs, tagID) - - if len(tagIDs) != oldLen { - if err := qb.UpdateTags(id, tagIDs); err != nil { - return false, err - } - - return true, nil - } - - return false, nil +func AddTag(ctx context.Context, qb PartialUpdater, i *models.Image, tagID int) error { + _, err := qb.UpdatePartial(ctx, i.ID, models.ImagePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }) + return err } diff --git a/pkg/job/job.go b/pkg/job/job.go index 09188eb9d..b3e8685f6 100644 --- a/pkg/job/job.go +++ b/pkg/job/job.go @@ -40,6 +40,8 @@ const ( StatusFinished Status = "FINISHED" // StatusCancelled means that the job was cancelled and is now stopped. StatusCancelled Status = "CANCELLED" + // StatusFailed means that the job failed. + StatusFailed Status = "FAILED" ) // Job represents the status of a queued or running job. diff --git a/pkg/job/manager.go b/pkg/job/manager.go index 1af604f7d..ce5fd4f9d 100644 --- a/pkg/job/manager.go +++ b/pkg/job/manager.go @@ -2,8 +2,11 @@ package job import ( "context" + "runtime/debug" "sync" "time" + + "github.com/stashapp/stash/pkg/logger" ) const maxGraveyardSize = 10 @@ -179,27 +182,39 @@ func (m *Manager) dispatch(ctx context.Context, j *Job) (done chan struct{}) { j.cancelFunc = cancelFunc done = make(chan struct{}) - go func() { - progress := m.newProgress(j) - j.exec.Execute(ctx, progress) - - m.onJobFinish(j) - - close(done) - }() + go m.executeJob(ctx, j, done) m.notifyJobUpdate(j) return } +func (m *Manager) executeJob(ctx context.Context, j *Job, done chan struct{}) { + defer close(done) + defer m.onJobFinish(j) + defer func() { + if p := recover(); p != nil { + // a panic occurred, log and mark the job as failed + logger.Errorf("panic in job %d - %s: %v", j.ID, j.Description, p) + logger.Error(string(debug.Stack())) + + m.mutex.Lock() + defer m.mutex.Unlock() + j.Status = StatusFailed + } + }() + + progress := m.newProgress(j) + j.exec.Execute(ctx, progress) +} + func (m *Manager) onJobFinish(job *Job) { m.mutex.Lock() defer m.mutex.Unlock() if job.Status == StatusStopping { job.Status = StatusCancelled - } else { + } else if job.Status != StatusFailed { job.Status = StatusFinished } t := time.Now() diff --git a/pkg/job/progress.go b/pkg/job/progress.go index 3bd6c3f08..51216331d 100644 --- a/pkg/job/progress.go +++ b/pkg/job/progress.go @@ -9,6 +9,7 @@ const ProgressIndefinite float64 = -1 // Progress is used by JobExec to communicate updates to the job's progress to // the JobManager. type Progress struct { + defined bool processed int total int percent float64 @@ -36,17 +37,38 @@ func (p *Progress) Indefinite() { p.mutex.Lock() defer p.mutex.Unlock() + p.defined = false p.total = 0 p.calculatePercent() } -// SetTotal sets the total number of work units. This is used to calculate the -// progress percentage. +// Definite notifies that the total is known. +func (p *Progress) Definite() { + p.mutex.Lock() + defer p.mutex.Unlock() + + p.defined = true + p.calculatePercent() +} + +// SetTotal sets the total number of work units and sets definite to true. +// This is used to calculate the progress percentage. func (p *Progress) SetTotal(total int) { p.mutex.Lock() defer p.mutex.Unlock() p.total = total + p.defined = true + p.calculatePercent() +} + +// AddTotal adds to the total number of work units. This is used to calculate the +// progress percentage. +func (p *Progress) AddTotal(total int) { + p.mutex.Lock() + defer p.mutex.Unlock() + + p.total += total p.calculatePercent() } @@ -62,7 +84,7 @@ func (p *Progress) SetProcessed(processed int) { func (p *Progress) calculatePercent() { switch { - case p.total <= 0: + case !p.defined || p.total <= 0: p.percent = ProgressIndefinite case p.processed < 0: p.percent = 0 @@ -99,7 +121,7 @@ func (p *Progress) Increment() { p.mutex.Lock() defer p.mutex.Unlock() - if p.total <= 0 || p.processed < p.total { + if !p.defined || p.total <= 0 || p.processed < p.total { p.processed++ p.calculatePercent() } @@ -112,7 +134,7 @@ func (p *Progress) AddProcessed(v int) { defer p.mutex.Unlock() newVal := v - if newVal > p.total { + if p.defined && p.total > 0 && newVal > p.total { newVal = p.total } @@ -124,7 +146,7 @@ func (p *Progress) addTask(t *task) { p.mutex.Lock() defer p.mutex.Unlock() - p.currentTasks = append(p.currentTasks, t) + p.currentTasks = append([]*task{t}, p.currentTasks...) p.updated() } diff --git a/pkg/job/progress_test.go b/pkg/job/progress_test.go index 5bca05ae4..716fdf9e1 100644 --- a/pkg/job/progress_test.go +++ b/pkg/job/progress_test.go @@ -14,6 +14,7 @@ func createProgress(m *Manager, j *Job) Progress { job: j, }, total: 100, + defined: true, processed: 10, percent: 10, } diff --git a/pkg/job/task.go b/pkg/job/task.go new file mode 100644 index 000000000..fa0891e6f --- /dev/null +++ b/pkg/job/task.go @@ -0,0 +1,67 @@ +package job + +import ( + "context" + + "github.com/remeh/sizedwaitgroup" +) + +type taskExec struct { + task + fn func(ctx context.Context) +} + +type TaskQueue struct { + p *Progress + wg sizedwaitgroup.SizedWaitGroup + tasks chan taskExec + done chan struct{} +} + +func NewTaskQueue(ctx context.Context, p *Progress, queueSize int, processes int) *TaskQueue { + ret := &TaskQueue{ + p: p, + wg: sizedwaitgroup.New(processes), + tasks: make(chan taskExec, queueSize), + done: make(chan struct{}), + } + + go ret.executer(ctx) + + return ret +} + +func (tq *TaskQueue) Add(description string, fn func(ctx context.Context)) { + tq.tasks <- taskExec{ + task: task{ + description: description, + }, + fn: fn, + } +} + +func (tq *TaskQueue) Close() { + close(tq.tasks) + // wait for all tasks to finish + <-tq.done +} + +func (tq *TaskQueue) executer(ctx context.Context) { + defer close(tq.done) + defer tq.wg.Wait() + for task := range tq.tasks { + if IsCancelled(ctx) { + return + } + + tt := task + + tq.wg.Add() + go func() { + defer tq.wg.Done() + tq.p.ExecuteTask(tt.description, func() { + tt.fn(ctx) + }) + }() + } +} diff --git a/pkg/logger/basic.go b/pkg/logger/basic.go new file mode 100644 index 000000000..d872777d5 --- /dev/null +++ b/pkg/logger/basic.go @@ -0,0 +1,74 @@ +package logger + +import ( + "fmt" + "os" +) + +// BasicLogger logs all messages to stdout +type BasicLogger struct{} + +var _ LoggerImpl = &BasicLogger{} + +func (log *BasicLogger) print(level string, args ...interface{}) { + fmt.Print(level + ": ") + fmt.Println(args...) +} + +func (log *BasicLogger) printf(level string, format string, args ...interface{}) { + fmt.Printf(level+": "+format+"\n", args...) +} + +func (log *BasicLogger) Progressf(format string, args ...interface{}) { + log.printf("Progress", format, args...) +} + +func (log *BasicLogger) Trace(args ...interface{}) { + log.print("Trace", args...) +} + +func (log *BasicLogger) Tracef(format string, args ...interface{}) { + log.printf("Trace", format, args...) +} + +func (log *BasicLogger) Debug(args ...interface{}) { + log.print("Debug", args...) +} + +func (log *BasicLogger) Debugf(format string, args ...interface{}) { + log.printf("Debug", format, args...) +} + +func (log *BasicLogger) Info(args ...interface{}) { + log.print("Info", args...) +} + +func (log *BasicLogger) Infof(format string, args ...interface{}) { + log.printf("Info", format, args...) +} + +func (log *BasicLogger) Warn(args ...interface{}) { + log.print("Warn", args...) +} + +func (log *BasicLogger) Warnf(format string, args ...interface{}) { + log.printf("Warn", format, args...) +} + +func (log *BasicLogger) Error(args ...interface{}) { + log.print("Error", args...) +} + +func (log *BasicLogger) Errorf(format string, args ...interface{}) { + log.printf("Error", format, args...) +} + +func (log *BasicLogger) Fatal(args ...interface{}) { + log.print("Fatal", args...) + os.Exit(1) +} + +func (log *BasicLogger) Fatalf(format string, args ...interface{}) { + log.printf("Fatal", format, args...) + os.Exit(1) +} diff --git a/pkg/match/cache.go b/pkg/match/cache.go index 6d7238809..06237c7f6 100644 --- a/pkg/match/cache.go +++ b/pkg/match/cache.go @@ -1,6 +1,10 @@ package match -import "github.com/stashapp/stash/pkg/models" +import ( + "context" + + "github.com/stashapp/stash/pkg/models" +) const singleFirstCharacterRegex = `^[\p{L}][.\-_ ]` @@ -16,14 +20,14 @@ type Cache struct { // against. This means that performers with single-letter words in their names could potentially // be missed. // This query is expensive, so it's queried once and cached, if the cache if provided. -func getSingleLetterPerformers(c *Cache, reader models.PerformerReader) ([]*models.Performer, error) { +func getSingleLetterPerformers(ctx context.Context, c *Cache, reader PerformerAutoTagQueryer) ([]*models.Performer, error) { if c == nil { c = &Cache{} } if c.singleCharPerformers == nil { pp := -1 - performers, _, err := reader.Query(&models.PerformerFilterType{ + performers, _, err := reader.Query(ctx, &models.PerformerFilterType{ Name: &models.StringCriterionInput{ Value: singleFirstCharacterRegex, Modifier: models.CriterionModifierMatchesRegex, @@ -49,14 +53,14 @@ func getSingleLetterPerformers(c *Cache, reader models.PerformerReader) ([]*mode // getSingleLetterStudios returns all studios with names that start with single character words. // See getSingleLetterPerformers for details. -func getSingleLetterStudios(c *Cache, reader models.StudioReader) ([]*models.Studio, error) { +func getSingleLetterStudios(ctx context.Context, c *Cache, reader StudioAutoTagQueryer) ([]*models.Studio, error) { if c == nil { c = &Cache{} } if c.singleCharStudios == nil { pp := -1 - studios, _, err := reader.Query(&models.StudioFilterType{ + studios, _, err := reader.Query(ctx, &models.StudioFilterType{ Name: &models.StringCriterionInput{ Value: singleFirstCharacterRegex, Modifier: models.CriterionModifierMatchesRegex, @@ -82,14 +86,14 @@ func getSingleLetterStudios(c *Cache, reader models.StudioReader) ([]*models.Stu // getSingleLetterTags returns all tags with names that start with single character words. // See getSingleLetterPerformers for details. -func getSingleLetterTags(c *Cache, reader models.TagReader) ([]*models.Tag, error) { +func getSingleLetterTags(ctx context.Context, c *Cache, reader TagAutoTagQueryer) ([]*models.Tag, error) { if c == nil { c = &Cache{} } if c.singleCharTags == nil { pp := -1 - tags, _, err := reader.Query(&models.TagFilterType{ + tags, _, err := reader.Query(ctx, &models.TagFilterType{ Name: &models.StringCriterionInput{ Value: singleFirstCharacterRegex, Modifier: models.CriterionModifierMatchesRegex, diff --git a/pkg/match/path.go b/pkg/match/path.go index 4f20423dd..e77fc2e59 100644 --- a/pkg/match/path.go +++ b/pkg/match/path.go @@ -1,6 +1,7 @@ package match import ( + "context" "fmt" "path/filepath" "regexp" @@ -12,6 +13,8 @@ import ( "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scene" "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/studio" + "github.com/stashapp/stash/pkg/tag" ) const ( @@ -24,6 +27,23 @@ const ( var separatorRE = regexp.MustCompile(separatorPattern) +type PerformerAutoTagQueryer interface { + Query(ctx context.Context, performerFilter *models.PerformerFilterType, findFilter *models.FindFilterType) ([]*models.Performer, int, error) + QueryForAutoTag(ctx context.Context, words []string) ([]*models.Performer, error) +} + +type StudioAutoTagQueryer interface { + QueryForAutoTag(ctx context.Context, words []string) ([]*models.Studio, error) + studio.Queryer + GetAliases(ctx context.Context, studioID int) ([]string, error) +} + +type TagAutoTagQueryer interface { + QueryForAutoTag(ctx context.Context, words []string) ([]*models.Tag, error) + tag.Queryer + GetAliases(ctx context.Context, tagID int) ([]string, error) +} + func getPathQueryRegex(name string) string { // escape specific regex characters name = regexp.QuoteMeta(name) @@ -124,13 +144,13 @@ func regexpMatchesPath(r *regexp.Regexp, path string) int { return found[len(found)-1][0] } -func getPerformers(words []string, performerReader models.PerformerReader, cache *Cache) ([]*models.Performer, error) { - performers, err := performerReader.QueryForAutoTag(words) +func getPerformers(ctx context.Context, words []string, performerReader PerformerAutoTagQueryer, cache *Cache) ([]*models.Performer, error) { + performers, err := performerReader.QueryForAutoTag(ctx, words) if err != nil { return nil, err } - swPerformers, err := getSingleLetterPerformers(cache, performerReader) + swPerformers, err := getSingleLetterPerformers(ctx, cache, performerReader) if err != nil { return nil, err } @@ -138,10 +158,10 @@ func getPerformers(words []string, performerReader models.PerformerReader, cache return append(performers, swPerformers...), nil } -func PathToPerformers(path string, reader models.PerformerReader, cache *Cache, trimExt bool) ([]*models.Performer, error) { +func PathToPerformers(ctx context.Context, path string, reader PerformerAutoTagQueryer, cache *Cache, trimExt bool) ([]*models.Performer, error) { words := getPathWords(path, trimExt) - performers, err := getPerformers(words, reader, cache) + performers, err := getPerformers(ctx, words, reader, cache) if err != nil { return nil, err } @@ -157,13 +177,13 @@ func PathToPerformers(path string, reader models.PerformerReader, cache *Cache, return ret, nil } -func getStudios(words []string, reader models.StudioReader, cache *Cache) ([]*models.Studio, error) { - studios, err := reader.QueryForAutoTag(words) +func getStudios(ctx context.Context, words []string, reader StudioAutoTagQueryer, cache *Cache) ([]*models.Studio, error) { + studios, err := reader.QueryForAutoTag(ctx, words) if err != nil { return nil, err } - swStudios, err := getSingleLetterStudios(cache, reader) + swStudios, err := getSingleLetterStudios(ctx, cache, reader) if err != nil { return nil, err } @@ -174,9 +194,9 @@ func getStudios(words []string, reader models.StudioReader, cache *Cache) ([]*mo // PathToStudio returns the Studio that matches the given path. // Where multiple matching studios are found, the one that matches the latest // position in the path is returned. -func PathToStudio(path string, reader models.StudioReader, cache *Cache, trimExt bool) (*models.Studio, error) { +func PathToStudio(ctx context.Context, path string, reader StudioAutoTagQueryer, cache *Cache, trimExt bool) (*models.Studio, error) { words := getPathWords(path, trimExt) - candidates, err := getStudios(words, reader, cache) + candidates, err := getStudios(ctx, words, reader, cache) if err != nil { return nil, err @@ -191,7 +211,7 @@ func PathToStudio(path string, reader models.StudioReader, cache *Cache, trimExt index = matchIndex } - aliases, err := reader.GetAliases(c.ID) + aliases, err := reader.GetAliases(ctx, c.ID) if err != nil { return nil, err } @@ -208,13 +228,13 @@ func PathToStudio(path string, reader models.StudioReader, cache *Cache, trimExt return ret, nil } -func getTags(words []string, reader models.TagReader, cache *Cache) ([]*models.Tag, error) { - tags, err := reader.QueryForAutoTag(words) +func getTags(ctx context.Context, words []string, reader TagAutoTagQueryer, cache *Cache) ([]*models.Tag, error) { + tags, err := reader.QueryForAutoTag(ctx, words) if err != nil { return nil, err } - swTags, err := getSingleLetterTags(cache, reader) + swTags, err := getSingleLetterTags(ctx, cache, reader) if err != nil { return nil, err } @@ -222,9 +242,9 @@ func getTags(words []string, reader models.TagReader, cache *Cache) ([]*models.T return append(tags, swTags...), nil } -func PathToTags(path string, reader models.TagReader, cache *Cache, trimExt bool) ([]*models.Tag, error) { +func PathToTags(ctx context.Context, path string, reader TagAutoTagQueryer, cache *Cache, trimExt bool) ([]*models.Tag, error) { words := getPathWords(path, trimExt) - tags, err := getTags(words, reader, cache) + tags, err := getTags(ctx, words, reader, cache) if err != nil { return nil, err @@ -238,7 +258,7 @@ func PathToTags(path string, reader models.TagReader, cache *Cache, trimExt bool } if !matches { - aliases, err := reader.GetAliases(t.ID) + aliases, err := reader.GetAliases(ctx, t.ID) if err != nil { return nil, err } @@ -258,7 +278,7 @@ func PathToTags(path string, reader models.TagReader, cache *Cache, trimExt bool return ret, nil } -func PathToScenes(name string, paths []string, sceneReader models.SceneReader) ([]*models.Scene, error) { +func PathToScenes(ctx context.Context, name string, paths []string, sceneReader scene.Queryer) ([]*models.Scene, error) { regex := getPathQueryRegex(name) organized := false filter := models.SceneFilterType{ @@ -272,7 +292,7 @@ func PathToScenes(name string, paths []string, sceneReader models.SceneReader) ( filter.And = scene.PathsFilter(paths) pp := models.PerPageAll - scenes, err := scene.Query(sceneReader, &filter, &models.FindFilterType{ + scenes, err := scene.Query(ctx, sceneReader, &filter, &models.FindFilterType{ PerPage: &pp, }) @@ -295,7 +315,7 @@ func PathToScenes(name string, paths []string, sceneReader models.SceneReader) ( return ret, nil } -func PathToImages(name string, paths []string, imageReader models.ImageReader) ([]*models.Image, error) { +func PathToImages(ctx context.Context, name string, paths []string, imageReader image.Queryer) ([]*models.Image, error) { regex := getPathQueryRegex(name) organized := false filter := models.ImageFilterType{ @@ -309,7 +329,7 @@ func PathToImages(name string, paths []string, imageReader models.ImageReader) ( filter.And = image.PathsFilter(paths) pp := models.PerPageAll - images, err := image.Query(imageReader, &filter, &models.FindFilterType{ + images, err := image.Query(ctx, imageReader, &filter, &models.FindFilterType{ PerPage: &pp, }) @@ -332,7 +352,7 @@ func PathToImages(name string, paths []string, imageReader models.ImageReader) ( return ret, nil } -func PathToGalleries(name string, paths []string, galleryReader models.GalleryReader) ([]*models.Gallery, error) { +func PathToGalleries(ctx context.Context, name string, paths []string, galleryReader gallery.Queryer) ([]*models.Gallery, error) { regex := getPathQueryRegex(name) organized := false filter := models.GalleryFilterType{ @@ -346,7 +366,7 @@ func PathToGalleries(name string, paths []string, galleryReader models.GalleryRe filter.And = gallery.PathsFilter(paths) pp := models.PerPageAll - gallerys, _, err := galleryReader.Query(&filter, &models.FindFilterType{ + gallerys, _, err := galleryReader.Query(ctx, &filter, &models.FindFilterType{ PerPage: &pp, }) @@ -361,7 +381,8 @@ func PathToGalleries(name string, paths []string, galleryReader models.GalleryRe r := nameToRegexp(name, useUnicode) for _, p := range gallerys { - if regexpMatchesPath(r, p.Path.String) != -1 { + path := p.Path + if path != "" && regexpMatchesPath(r, path) != -1 { ret = append(ret, p) } } diff --git a/pkg/match/scraped.go b/pkg/match/scraped.go index 1e9de81e1..d1182a329 100644 --- a/pkg/match/scraped.go +++ b/pkg/match/scraped.go @@ -1,6 +1,7 @@ package match import ( + "context" "strconv" "github.com/stashapp/stash/pkg/models" @@ -8,16 +9,25 @@ import ( "github.com/stashapp/stash/pkg/tag" ) +type PerformerFinder interface { + FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Performer, error) + FindByStashID(ctx context.Context, stashID models.StashID) ([]*models.Performer, error) +} + +type MovieNamesFinder interface { + FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Movie, error) +} + // ScrapedPerformer matches the provided performer with the // performers in the database and sets the ID field if one is found. -func ScrapedPerformer(qb models.PerformerReader, p *models.ScrapedPerformer, stashBoxEndpoint *string) error { +func ScrapedPerformer(ctx context.Context, qb PerformerFinder, p *models.ScrapedPerformer, stashBoxEndpoint *string) error { if p.StoredID != nil || p.Name == nil { return nil } // Check if a performer with the StashID already exists if stashBoxEndpoint != nil && p.RemoteSiteID != nil { - performers, err := qb.FindByStashID(models.StashID{ + performers, err := qb.FindByStashID(ctx, models.StashID{ StashID: *p.RemoteSiteID, Endpoint: *stashBoxEndpoint, }) @@ -31,7 +41,7 @@ func ScrapedPerformer(qb models.PerformerReader, p *models.ScrapedPerformer, sta } } - performers, err := qb.FindByNames([]string{*p.Name}, true) + performers, err := qb.FindByNames(ctx, []string{*p.Name}, true) if err != nil { return err @@ -47,16 +57,21 @@ func ScrapedPerformer(qb models.PerformerReader, p *models.ScrapedPerformer, sta return nil } +type StudioFinder interface { + studio.Queryer + FindByStashID(ctx context.Context, stashID models.StashID) ([]*models.Studio, error) +} + // ScrapedStudio matches the provided studio with the studios // in the database and sets the ID field if one is found. -func ScrapedStudio(qb models.StudioReader, s *models.ScrapedStudio, stashBoxEndpoint *string) error { +func ScrapedStudio(ctx context.Context, qb StudioFinder, s *models.ScrapedStudio, stashBoxEndpoint *string) error { if s.StoredID != nil { return nil } // Check if a studio with the StashID already exists if stashBoxEndpoint != nil && s.RemoteSiteID != nil { - studios, err := qb.FindByStashID(models.StashID{ + studios, err := qb.FindByStashID(ctx, models.StashID{ StashID: *s.RemoteSiteID, Endpoint: *stashBoxEndpoint, }) @@ -70,7 +85,7 @@ func ScrapedStudio(qb models.StudioReader, s *models.ScrapedStudio, stashBoxEndp } } - st, err := studio.ByName(qb, s.Name) + st, err := studio.ByName(ctx, qb, s.Name) if err != nil { return err @@ -78,7 +93,7 @@ func ScrapedStudio(qb models.StudioReader, s *models.ScrapedStudio, stashBoxEndp if st == nil { // try matching by alias - st, err = studio.ByAlias(qb, s.Name) + st, err = studio.ByAlias(ctx, qb, s.Name) if err != nil { return err } @@ -96,12 +111,12 @@ func ScrapedStudio(qb models.StudioReader, s *models.ScrapedStudio, stashBoxEndp // ScrapedMovie matches the provided movie with the movies // in the database and sets the ID field if one is found. -func ScrapedMovie(qb models.MovieReader, m *models.ScrapedMovie) error { +func ScrapedMovie(ctx context.Context, qb MovieNamesFinder, m *models.ScrapedMovie) error { if m.StoredID != nil || m.Name == nil { return nil } - movies, err := qb.FindByNames([]string{*m.Name}, true) + movies, err := qb.FindByNames(ctx, []string{*m.Name}, true) if err != nil { return err @@ -119,12 +134,12 @@ func ScrapedMovie(qb models.MovieReader, m *models.ScrapedMovie) error { // ScrapedTag matches the provided tag with the tags // in the database and sets the ID field if one is found. -func ScrapedTag(qb models.TagReader, s *models.ScrapedTag) error { +func ScrapedTag(ctx context.Context, qb tag.Queryer, s *models.ScrapedTag) error { if s.StoredID != nil { return nil } - t, err := tag.ByName(qb, s.Name) + t, err := tag.ByName(ctx, qb, s.Name) if err != nil { return err @@ -132,7 +147,7 @@ func ScrapedTag(qb models.TagReader, s *models.ScrapedTag) error { if t == nil { // try matching by alias - t, err = tag.ByAlias(qb, s.Name) + t, err = tag.ByAlias(ctx, qb, s.Name) if err != nil { return err } diff --git a/pkg/models/date.go b/pkg/models/date.go new file mode 100644 index 000000000..5fbb8f5bf --- /dev/null +++ b/pkg/models/date.go @@ -0,0 +1,19 @@ +package models + +import "time" + +// Date wraps a time.Time with a format of "YYYY-MM-DD" +type Date struct { + time.Time +} + +const dateFormat = "2006-01-02" + +func (d Date) String() string { + return d.Format(dateFormat) +} + +func NewDate(s string) Date { + t, _ := time.Parse(dateFormat, s) + return Date{t} +} diff --git a/pkg/models/extension_resolution.go b/pkg/models/extension_resolution.go deleted file mode 100644 index a52d4a784..000000000 --- a/pkg/models/extension_resolution.go +++ /dev/null @@ -1,46 +0,0 @@ -package models - -type ResolutionRange struct { - min, max int -} - -var resolutionRanges = map[ResolutionEnum]ResolutionRange{ - ResolutionEnumVeryLow: {144, 239}, - ResolutionEnumLow: {240, 359}, - ResolutionEnumR360p: {360, 479}, - ResolutionEnumStandard: {480, 539}, - ResolutionEnumWebHd: {540, 719}, - ResolutionEnumStandardHd: {720, 1079}, - ResolutionEnumFullHd: {1080, 1439}, - ResolutionEnumQuadHd: {1440, 1919}, - ResolutionEnumVrHd: {1920, 2159}, - ResolutionEnumFourK: {2160, 2879}, - ResolutionEnumFiveK: {2880, 3383}, - ResolutionEnumSixK: {3384, 4319}, - ResolutionEnumEightK: {4320, 8639}, -} - -// GetMaxResolution returns the maximum width or height that media must be -// to qualify as this resolution. -func (r *ResolutionEnum) GetMaxResolution() int { - return resolutionRanges[*r].max -} - -// GetMinResolution returns the minimum width or height that media must be -// to qualify as this resolution. -func (r ResolutionEnum) GetMinResolution() int { - return resolutionRanges[r].min -} - -var streamingResolutionMax = map[StreamingResolutionEnum]int{ - StreamingResolutionEnumLow: resolutionRanges[ResolutionEnumLow].min, - StreamingResolutionEnumStandard: resolutionRanges[ResolutionEnumStandard].min, - StreamingResolutionEnumStandardHd: resolutionRanges[ResolutionEnumStandardHd].min, - StreamingResolutionEnumFullHd: resolutionRanges[ResolutionEnumFullHd].min, - StreamingResolutionEnumFourK: resolutionRanges[ResolutionEnumFourK].min, - StreamingResolutionEnumOriginal: 0, -} - -func (r StreamingResolutionEnum) GetMaxResolution() int { - return streamingResolutionMax[r] -} diff --git a/pkg/models/file.go b/pkg/models/file.go new file mode 100644 index 000000000..827a55d5c --- /dev/null +++ b/pkg/models/file.go @@ -0,0 +1,80 @@ +package models + +import ( + "context" + "path/filepath" + "strings" + + "github.com/stashapp/stash/pkg/file" +) + +type FileQueryOptions struct { + QueryOptions + FileFilter *FileFilterType +} + +type FileFilterType struct { + And *FileFilterType `json:"AND"` + Or *FileFilterType `json:"OR"` + Not *FileFilterType `json:"NOT"` + + // Filter by path + Path *StringCriterionInput `json:"path"` +} + +func PathsFileFilter(paths []string) *FileFilterType { + if paths == nil { + return nil + } + + sep := string(filepath.Separator) + + var ret *FileFilterType + var or *FileFilterType + for _, p := range paths { + newOr := &FileFilterType{} + if or != nil { + or.Or = newOr + } else { + ret = newOr + } + + or = newOr + + if !strings.HasSuffix(p, sep) { + p += sep + } + + or.Path = &StringCriterionInput{ + Modifier: CriterionModifierEquals, + Value: p + "%", + } + } + + return ret +} + +type FileQueryResult struct { + // can't use QueryResult because id type is wrong + + IDs []file.ID + Count int + + finder file.Finder + files []file.File + resolveErr error +} + +func NewFileQueryResult(finder file.Finder) *FileQueryResult { + return &FileQueryResult{ + finder: finder, + } +} + +func (r *FileQueryResult) Resolve(ctx context.Context) ([]file.File, error) { + // cache results + if r.files == nil && r.resolveErr == nil { + r.files, r.resolveErr = r.finder.Find(ctx, r.IDs...) + } + return r.files, r.resolveErr +} diff --git a/pkg/models/filter.go b/pkg/models/filter.go new file mode 100644 index 000000000..57bee72df --- /dev/null +++ b/pkg/models/filter.go @@ -0,0 +1,108 @@ +package models + +import ( + "fmt" + "io" + "strconv" +) + +type CriterionModifier string + +const ( + // = + CriterionModifierEquals CriterionModifier = "EQUALS" + // != + CriterionModifierNotEquals CriterionModifier = "NOT_EQUALS" + // > + CriterionModifierGreaterThan CriterionModifier = "GREATER_THAN" + // < + CriterionModifierLessThan CriterionModifier = "LESS_THAN" + // IS NULL + CriterionModifierIsNull CriterionModifier = "IS_NULL" + // IS NOT NULL + CriterionModifierNotNull CriterionModifier = "NOT_NULL" + // INCLUDES ALL + CriterionModifierIncludesAll CriterionModifier = "INCLUDES_ALL" + CriterionModifierIncludes CriterionModifier = "INCLUDES" + CriterionModifierExcludes CriterionModifier = "EXCLUDES" + // MATCHES REGEX + CriterionModifierMatchesRegex CriterionModifier = "MATCHES_REGEX" + // NOT MATCHES REGEX + CriterionModifierNotMatchesRegex CriterionModifier = "NOT_MATCHES_REGEX" + // >= AND <= + CriterionModifierBetween CriterionModifier = "BETWEEN" + // < OR > + CriterionModifierNotBetween CriterionModifier = "NOT_BETWEEN" +) + +var AllCriterionModifier = []CriterionModifier{ + CriterionModifierEquals, + CriterionModifierNotEquals, + CriterionModifierGreaterThan, + CriterionModifierLessThan, + CriterionModifierIsNull, + CriterionModifierNotNull, + CriterionModifierIncludesAll, + CriterionModifierIncludes, + CriterionModifierExcludes, + CriterionModifierMatchesRegex, + CriterionModifierNotMatchesRegex, + CriterionModifierBetween, + CriterionModifierNotBetween, +} + +func (e CriterionModifier) IsValid() bool { + switch e { + case CriterionModifierEquals, CriterionModifierNotEquals, CriterionModifierGreaterThan, CriterionModifierLessThan, CriterionModifierIsNull, CriterionModifierNotNull, CriterionModifierIncludesAll, CriterionModifierIncludes, CriterionModifierExcludes, CriterionModifierMatchesRegex, CriterionModifierNotMatchesRegex, CriterionModifierBetween, CriterionModifierNotBetween: + return true + } + return false +} + +func (e CriterionModifier) String() string { + return string(e) +} + +func (e *CriterionModifier) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = CriterionModifier(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid CriterionModifier", str) + } + return nil +} + +func (e CriterionModifier) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type StringCriterionInput struct { + Value string `json:"value"` + Modifier CriterionModifier `json:"modifier"` +} + +type IntCriterionInput struct { + Value int `json:"value"` + Value2 *int `json:"value2"` + Modifier CriterionModifier `json:"modifier"` +} + +type ResolutionCriterionInput struct { + Value ResolutionEnum `json:"value"` + Modifier CriterionModifier `json:"modifier"` +} + +type HierarchicalMultiCriterionInput struct { + Value []string `json:"value"` + Modifier CriterionModifier `json:"modifier"` + Depth *int `json:"depth"` +} + +type MultiCriterionInput struct { + Value []string `json:"value"` + Modifier CriterionModifier `json:"modifier"` +} diff --git a/pkg/models/extension_find_filter.go b/pkg/models/find_filter.go similarity index 57% rename from pkg/models/extension_find_filter.go rename to pkg/models/find_filter.go index 1a6fb15ed..f684ca065 100644 --- a/pkg/models/extension_find_filter.go +++ b/pkg/models/find_filter.go @@ -1,9 +1,65 @@ package models +import ( + "fmt" + "io" + "strconv" +) + // PerPageAll is the value used for perPage to indicate all results should be // returned. const PerPageAll = -1 +type SortDirectionEnum string + +const ( + SortDirectionEnumAsc SortDirectionEnum = "ASC" + SortDirectionEnumDesc SortDirectionEnum = "DESC" +) + +var AllSortDirectionEnum = []SortDirectionEnum{ + SortDirectionEnumAsc, + SortDirectionEnumDesc, +} + +func (e SortDirectionEnum) IsValid() bool { + switch e { + case SortDirectionEnumAsc, SortDirectionEnumDesc: + return true + } + return false +} + +func (e SortDirectionEnum) String() string { + return string(e) +} + +func (e *SortDirectionEnum) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = SortDirectionEnum(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid SortDirectionEnum", str) + } + return nil +} + +func (e SortDirectionEnum) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type FindFilterType struct { + Q *string `json:"q"` + Page *int `json:"page"` + // use per_page = -1 to indicate all results. Defaults to 25. + PerPage *int `json:"per_page"` + Sort *string `json:"sort"` + Direction *SortDirectionEnum `json:"direction"` +} + func (ff FindFilterType) GetSort(defaultSort string) string { var sort string if ff.Sort == nil { diff --git a/pkg/models/gallery.go b/pkg/models/gallery.go index 75fcfc896..ed8e078dd 100644 --- a/pkg/models/gallery.go +++ b/pkg/models/gallery.go @@ -1,33 +1,109 @@ package models +import ( + "context" + + "github.com/stashapp/stash/pkg/file" +) + +type GalleryFilterType struct { + And *GalleryFilterType `json:"AND"` + Or *GalleryFilterType `json:"OR"` + Not *GalleryFilterType `json:"NOT"` + Title *StringCriterionInput `json:"title"` + Details *StringCriterionInput `json:"details"` + // Filter by file checksum + Checksum *StringCriterionInput `json:"checksum"` + // Filter by path + Path *StringCriterionInput `json:"path"` + // Filter by zip file count + FileCount *IntCriterionInput `json:"file_count"` + // Filter to only include galleries missing this property + IsMissing *string `json:"is_missing"` + // Filter to include/exclude galleries that were created from zip + IsZip *bool `json:"is_zip"` + // Filter by rating + Rating *IntCriterionInput `json:"rating"` + // Filter by organized + Organized *bool `json:"organized"` + // Filter by average image resolution + AverageResolution *ResolutionCriterionInput `json:"average_resolution"` + // Filter to only include galleries with this studio + Studios *HierarchicalMultiCriterionInput `json:"studios"` + // Filter to only include galleries with these tags + Tags *HierarchicalMultiCriterionInput `json:"tags"` + // Filter by tag count + TagCount *IntCriterionInput `json:"tag_count"` + // Filter to only include galleries with performers with these tags + PerformerTags *HierarchicalMultiCriterionInput `json:"performer_tags"` + // Filter to only include galleries with these performers + Performers *MultiCriterionInput `json:"performers"` + // Filter by performer count + PerformerCount *IntCriterionInput `json:"performer_count"` + // Filter galleries that have performers that have been favorited + PerformerFavorite *bool `json:"performer_favorite"` + // Filter galleries by performer age at time of gallery + PerformerAge *IntCriterionInput `json:"performer_age"` + // Filter by number of images in this gallery + ImageCount *IntCriterionInput `json:"image_count"` + // Filter by url + URL *StringCriterionInput `json:"url"` +} + +type GalleryUpdateInput struct { + ClientMutationID *string `json:"clientMutationId"` + ID string `json:"id"` + Title *string `json:"title"` + URL *string `json:"url"` + Date *string `json:"date"` + Details *string `json:"details"` + Rating *int `json:"rating"` + Organized *bool `json:"organized"` + SceneIds []string `json:"scene_ids"` + StudioID *string `json:"studio_id"` + TagIds []string `json:"tag_ids"` + PerformerIds []string `json:"performer_ids"` +} + +type GalleryDestroyInput struct { + Ids []string `json:"ids"` + // If true, then the zip file will be deleted if the gallery is zip-file-based. + // If gallery is folder-based, then any files not associated with other + // galleries will be deleted, along with the folder, if it is not empty. + DeleteFile *bool `json:"delete_file"` + DeleteGenerated *bool `json:"delete_generated"` +} + +type GalleryFinder interface { + FindMany(ctx context.Context, ids []int) ([]*Gallery, error) +} + type GalleryReader interface { - Find(id int) (*Gallery, error) - FindMany(ids []int) ([]*Gallery, error) - FindByChecksum(checksum string) (*Gallery, error) - FindByChecksums(checksums []string) ([]*Gallery, error) - FindByPath(path string) (*Gallery, error) - FindBySceneID(sceneID int) ([]*Gallery, error) - FindByImageID(imageID int) ([]*Gallery, error) - Count() (int, error) - All() ([]*Gallery, error) - Query(galleryFilter *GalleryFilterType, findFilter *FindFilterType) ([]*Gallery, int, error) - QueryCount(galleryFilter *GalleryFilterType, findFilter *FindFilterType) (int, error) - GetPerformerIDs(galleryID int) ([]int, error) - GetTagIDs(galleryID int) ([]int, error) - GetSceneIDs(galleryID int) ([]int, error) - GetImageIDs(galleryID int) ([]int, error) + Find(ctx context.Context, id int) (*Gallery, error) + GalleryFinder + FindByChecksum(ctx context.Context, checksum string) ([]*Gallery, error) + FindByChecksums(ctx context.Context, checksums []string) ([]*Gallery, error) + FindByPath(ctx context.Context, path string) ([]*Gallery, error) + FindBySceneID(ctx context.Context, sceneID int) ([]*Gallery, error) + FindByImageID(ctx context.Context, imageID int) ([]*Gallery, error) + + SceneIDLoader + PerformerIDLoader + TagIDLoader + + Count(ctx context.Context) (int, error) + All(ctx context.Context) ([]*Gallery, error) + Query(ctx context.Context, galleryFilter *GalleryFilterType, findFilter *FindFilterType) ([]*Gallery, int, error) + QueryCount(ctx context.Context, galleryFilter *GalleryFilterType, findFilter *FindFilterType) (int, error) + GetImageIDs(ctx context.Context, galleryID int) ([]int, error) } type GalleryWriter interface { - Create(newGallery Gallery) (*Gallery, error) - Update(updatedGallery Gallery) (*Gallery, error) - UpdatePartial(updatedGallery GalleryPartial) (*Gallery, error) - UpdateFileModTime(id int, modTime NullSQLiteTimestamp) error - Destroy(id int) error - UpdatePerformers(galleryID int, performerIDs []int) error - UpdateTags(galleryID int, tagIDs []int) error - UpdateScenes(galleryID int, sceneIDs []int) error - UpdateImages(galleryID int, imageIDs []int) error + Create(ctx context.Context, newGallery *Gallery, fileIDs []file.ID) error + Update(ctx context.Context, updatedGallery *Gallery) error + UpdatePartial(ctx context.Context, id int, updatedGallery GalleryPartial) (*Gallery, error) + Destroy(ctx context.Context, id int) error + UpdateImages(ctx context.Context, galleryID int, imageIDs []int) error } type GalleryReaderWriter interface { diff --git a/pkg/models/generate.go b/pkg/models/generate.go new file mode 100644 index 000000000..85685e078 --- /dev/null +++ b/pkg/models/generate.go @@ -0,0 +1,91 @@ +package models + +import ( + "fmt" + "io" + "strconv" +) + +type GenerateMetadataOptions struct { + Sprites *bool `json:"sprites"` + Previews *bool `json:"previews"` + ImagePreviews *bool `json:"imagePreviews"` + PreviewOptions *GeneratePreviewOptions `json:"previewOptions"` + Markers *bool `json:"markers"` + MarkerImagePreviews *bool `json:"markerImagePreviews"` + MarkerScreenshots *bool `json:"markerScreenshots"` + Transcodes *bool `json:"transcodes"` + Phashes *bool `json:"phashes"` + InteractiveHeatmapsSpeeds *bool `json:"interactiveHeatmapsSpeeds"` +} + +type GeneratePreviewOptions struct { + // Number of segments in a preview file + PreviewSegments *int `json:"previewSegments"` + // Preview segment duration, in seconds + PreviewSegmentDuration *float64 `json:"previewSegmentDuration"` + // Duration of start of video to exclude when generating previews + PreviewExcludeStart *string `json:"previewExcludeStart"` + // Duration of end of video to exclude when generating previews + PreviewExcludeEnd *string `json:"previewExcludeEnd"` + // Preset when generating preview + PreviewPreset *PreviewPreset `json:"previewPreset"` +} + +type PreviewPreset string + +const ( + // X264_ULTRAFAST + PreviewPresetUltrafast PreviewPreset = "ultrafast" + // X264_VERYFAST + PreviewPresetVeryfast PreviewPreset = "veryfast" + // X264_FAST + PreviewPresetFast PreviewPreset = "fast" + // X264_MEDIUM + PreviewPresetMedium PreviewPreset = "medium" + // X264_SLOW + PreviewPresetSlow PreviewPreset = "slow" + // X264_SLOWER + PreviewPresetSlower PreviewPreset = "slower" + // X264_VERYSLOW + PreviewPresetVeryslow PreviewPreset = "veryslow" +) + +var AllPreviewPreset = []PreviewPreset{ + PreviewPresetUltrafast, + PreviewPresetVeryfast, + PreviewPresetFast, + PreviewPresetMedium, + PreviewPresetSlow, + PreviewPresetSlower, + PreviewPresetVeryslow, +} + +func (e PreviewPreset) IsValid() bool { + switch e { + case PreviewPresetUltrafast, PreviewPresetVeryfast, PreviewPresetFast, PreviewPresetMedium, PreviewPresetSlow, PreviewPresetSlower, PreviewPresetVeryslow: + return true + } + return false +} + +func (e PreviewPreset) String() string { + return string(e) +} + +func (e *PreviewPreset) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = PreviewPreset(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid PreviewPreset", str) + } + return nil +} + +func (e PreviewPreset) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} diff --git a/pkg/models/image.go b/pkg/models/image.go index bae3c043f..9ded5939e 100644 --- a/pkg/models/image.go +++ b/pkg/models/image.go @@ -1,5 +1,58 @@ package models +import "context" + +type ImageFilterType struct { + And *ImageFilterType `json:"AND"` + Or *ImageFilterType `json:"OR"` + Not *ImageFilterType `json:"NOT"` + Title *StringCriterionInput `json:"title"` + // Filter by file checksum + Checksum *StringCriterionInput `json:"checksum"` + // Filter by path + Path *StringCriterionInput `json:"path"` + // Filter by file count + FileCount *IntCriterionInput `json:"file_count"` + // Filter by rating + Rating *IntCriterionInput `json:"rating"` + // Filter by organized + Organized *bool `json:"organized"` + // Filter by o-counter + OCounter *IntCriterionInput `json:"o_counter"` + // Filter by resolution + Resolution *ResolutionCriterionInput `json:"resolution"` + // Filter to only include images missing this property + IsMissing *string `json:"is_missing"` + // Filter to only include images with this studio + Studios *HierarchicalMultiCriterionInput `json:"studios"` + // Filter to only include images with these tags + Tags *HierarchicalMultiCriterionInput `json:"tags"` + // Filter by tag count + TagCount *IntCriterionInput `json:"tag_count"` + // Filter to only include images with performers with these tags + PerformerTags *HierarchicalMultiCriterionInput `json:"performer_tags"` + // Filter to only include images with these performers + Performers *MultiCriterionInput `json:"performers"` + // Filter by performer count + PerformerCount *IntCriterionInput `json:"performer_count"` + // Filter images that have performers that have been favorited + PerformerFavorite *bool `json:"performer_favorite"` + // Filter to only include images with these galleries + Galleries *MultiCriterionInput `json:"galleries"` +} + +type ImageDestroyInput struct { + ID string `json:"id"` + DeleteFile *bool `json:"delete_file"` + DeleteGenerated *bool `json:"delete_generated"` +} + +type ImagesDestroyInput struct { + Ids []string `json:"ids"` + DeleteFile *bool `json:"delete_file"` + DeleteGenerated *bool `json:"delete_generated"` +} + type ImageQueryOptions struct { QueryOptions ImageFilter *ImageFilterType @@ -24,54 +77,45 @@ func NewImageQueryResult(finder ImageFinder) *ImageQueryResult { } } -func (r *ImageQueryResult) Resolve() ([]*Image, error) { +func (r *ImageQueryResult) Resolve(ctx context.Context) ([]*Image, error) { // cache results if r.images == nil && r.resolveErr == nil { - r.images, r.resolveErr = r.finder.FindMany(r.IDs) + r.images, r.resolveErr = r.finder.FindMany(ctx, r.IDs) } return r.images, r.resolveErr } type ImageFinder interface { // TODO - rename to Find and remove existing method - FindMany(ids []int) ([]*Image, error) + FindMany(ctx context.Context, ids []int) ([]*Image, error) } type ImageReader interface { ImageFinder // TODO - remove this in another PR - Find(id int) (*Image, error) - FindByChecksum(checksum string) (*Image, error) - FindByGalleryID(galleryID int) ([]*Image, error) - CountByGalleryID(galleryID int) (int, error) - FindByPath(path string) (*Image, error) - // FindByPerformerID(performerID int) ([]*Image, error) - // CountByPerformerID(performerID int) (int, error) - // FindByStudioID(studioID int) ([]*Image, error) - Count() (int, error) - Size() (float64, error) - // SizeCount() (string, error) - // CountByStudioID(studioID int) (int, error) - // CountByTagID(tagID int) (int, error) - All() ([]*Image, error) - Query(options ImageQueryOptions) (*ImageQueryResult, error) - QueryCount(imageFilter *ImageFilterType, findFilter *FindFilterType) (int, error) - GetGalleryIDs(imageID int) ([]int, error) - GetTagIDs(imageID int) ([]int, error) - GetPerformerIDs(imageID int) ([]int, error) + Find(ctx context.Context, id int) (*Image, error) + FindByChecksum(ctx context.Context, checksum string) ([]*Image, error) + FindByGalleryID(ctx context.Context, galleryID int) ([]*Image, error) + CountByGalleryID(ctx context.Context, galleryID int) (int, error) + Count(ctx context.Context) (int, error) + Size(ctx context.Context) (float64, error) + All(ctx context.Context) ([]*Image, error) + Query(ctx context.Context, options ImageQueryOptions) (*ImageQueryResult, error) + QueryCount(ctx context.Context, imageFilter *ImageFilterType, findFilter *FindFilterType) (int, error) + + GalleryIDLoader + PerformerIDLoader + TagIDLoader } type ImageWriter interface { - Create(newImage Image) (*Image, error) - Update(updatedImage ImagePartial) (*Image, error) - UpdateFull(updatedImage Image) (*Image, error) - IncrementOCounter(id int) (int, error) - DecrementOCounter(id int) (int, error) - ResetOCounter(id int) (int, error) - Destroy(id int) error - UpdateGalleries(imageID int, galleryIDs []int) error - UpdatePerformers(imageID int, performerIDs []int) error - UpdateTags(imageID int, tagIDs []int) error + Create(ctx context.Context, newImage *ImageCreateInput) error + Update(ctx context.Context, updatedImage *Image) error + UpdatePartial(ctx context.Context, id int, partial ImagePartial) (*Image, error) + IncrementOCounter(ctx context.Context, id int) (int, error) + DecrementOCounter(ctx context.Context, id int) (int, error) + ResetOCounter(ctx context.Context, id int) (int, error) + Destroy(ctx context.Context, id int) error } type ImageReaderWriter interface { diff --git a/pkg/models/import.go b/pkg/models/import.go new file mode 100644 index 000000000..164f1b528 --- /dev/null +++ b/pkg/models/import.go @@ -0,0 +1,50 @@ +package models + +import ( + "fmt" + "io" + "strconv" +) + +type ImportMissingRefEnum string + +const ( + ImportMissingRefEnumIgnore ImportMissingRefEnum = "IGNORE" + ImportMissingRefEnumFail ImportMissingRefEnum = "FAIL" + ImportMissingRefEnumCreate ImportMissingRefEnum = "CREATE" +) + +var AllImportMissingRefEnum = []ImportMissingRefEnum{ + ImportMissingRefEnumIgnore, + ImportMissingRefEnumFail, + ImportMissingRefEnumCreate, +} + +func (e ImportMissingRefEnum) IsValid() bool { + switch e { + case ImportMissingRefEnumIgnore, ImportMissingRefEnumFail, ImportMissingRefEnumCreate: + return true + } + return false +} + +func (e ImportMissingRefEnum) String() string { + return string(e) +} + +func (e *ImportMissingRefEnum) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = ImportMissingRefEnum(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid ImportMissingRefEnum", str) + } + return nil +} + +func (e ImportMissingRefEnum) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} diff --git a/pkg/models/int64.go b/pkg/models/int64.go new file mode 100644 index 000000000..cfc557793 --- /dev/null +++ b/pkg/models/int64.go @@ -0,0 +1,39 @@ +package models + +import ( + "errors" + "fmt" + "io" + "strconv" + + "github.com/99designs/gqlgen/graphql" + "github.com/stashapp/stash/pkg/logger" +) + +var ErrInt64 = errors.New("cannot parse Int64") + +func MarshalInt64(v int64) graphql.Marshaler { + return graphql.WriterFunc(func(w io.Writer) { + _, err := io.WriteString(w, strconv.FormatInt(v, 10)) + if err != nil { + logger.Warnf("could not marshal int64: %v", err) + } + }) +} + +func UnmarshalInt64(v interface{}) (int64, error) { + if tmpStr, ok := v.(string); ok { + if len(tmpStr) == 0 { + return 0, nil + } + + ret, err := strconv.ParseInt(tmpStr, 10, 64) + if err != nil { + return 0, fmt.Errorf("cannot parse %v as Int64: %w", tmpStr, err) + } + + return ret, nil + } + + return 0, fmt.Errorf("%w: not a string", ErrInt64) +} diff --git a/pkg/models/jsonschema/file_folder.go b/pkg/models/jsonschema/file_folder.go new file mode 100644 index 000000000..dfe581f78 --- /dev/null +++ b/pkg/models/jsonschema/file_folder.go @@ -0,0 +1,156 @@ +package jsonschema + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/hash/md5" + "github.com/stashapp/stash/pkg/models/json" +) + +const ( + DirEntryTypeFolder = "folder" + DirEntryTypeVideo = "video" + DirEntryTypeImage = "image" + DirEntryTypeFile = "file" +) + +type DirEntry interface { + IsFile() bool + Filename() string + DirEntry() *BaseDirEntry +} + +type BaseDirEntry struct { + ZipFile string `json:"zip_file,omitempty"` + ModTime json.JSONTime `json:"mod_time"` + + Type string `json:"type,omitempty"` + + Path string `json:"path,omitempty"` + + CreatedAt json.JSONTime `json:"created_at,omitempty"` + UpdatedAt json.JSONTime `json:"updated_at,omitempty"` +} + +func (f *BaseDirEntry) DirEntry() *BaseDirEntry { + return f +} + +func (f *BaseDirEntry) IsFile() bool { + return false +} + +func (f *BaseDirEntry) Filename() string { + // prefix with the path depth so that we can import lower-level files/folders first + depth := strings.Count(f.Path, string(filepath.Separator)) + + // hash the full path for a unique filename + hash := md5.FromString(f.Path) + + basename := filepath.Base(f.Path) + + return fmt.Sprintf("%02x.%s.%s.json", depth, basename, hash) +} + +type BaseFile struct { + BaseDirEntry + + Fingerprints []Fingerprint `json:"fingerprints,omitempty"` + Size int64 `json:"size"` +} + +func (f *BaseFile) IsFile() bool { + return true +} + +type Fingerprint struct { + Type string `json:"type,omitempty"` + Fingerprint interface{} `json:"fingerprint,omitempty"` +} + +type VideoFile struct { + *BaseFile + Format string `json:"format,omitempty"` + Width int `json:"width,omitempty"` + Height int `json:"height,omitempty"` + Duration float64 `json:"duration,omitempty"` + VideoCodec string `json:"video_codec,omitempty"` + AudioCodec string `json:"audio_codec,omitempty"` + FrameRate float64 `json:"frame_rate,omitempty"` + BitRate int64 `json:"bitrate,omitempty"` + + Interactive bool `json:"interactive,omitempty"` + InteractiveSpeed *int `json:"interactive_speed,omitempty"` +} + +type ImageFile struct { + *BaseFile + Format string `json:"format,omitempty"` + Width int `json:"width,omitempty"` + Height int `json:"height,omitempty"` +} + +func LoadFileFile(filePath string) (DirEntry, error) { + r, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer r.Close() + + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + var json = jsoniter.ConfigCompatibleWithStandardLibrary + jsonParser := json.NewDecoder(bytes.NewReader(data)) + + var bf BaseDirEntry + if err := jsonParser.Decode(&bf); err != nil { + return nil, err + } + + jsonParser = json.NewDecoder(bytes.NewReader(data)) + + switch bf.Type { + case DirEntryTypeFolder: + return &bf, nil + case DirEntryTypeVideo: + var vf VideoFile + if err := jsonParser.Decode(&vf); err != nil { + return nil, err + } + + return &vf, nil + case DirEntryTypeImage: + var imf ImageFile + if err := jsonParser.Decode(&imf); err != nil { + return nil, err + } + + return &imf, nil + case DirEntryTypeFile: + var bff BaseFile + if err := jsonParser.Decode(&bff); err != nil { + return nil, err + } + + return &bff, nil + default: + return nil, errors.New("unknown file type") + } +} + +func SaveFileFile(filePath string, file DirEntry) error { + if file == nil { + return fmt.Errorf("file must not be nil") + } + return marshalToFile(filePath, file) +} diff --git a/pkg/models/jsonschema/folder.go b/pkg/models/jsonschema/folder.go new file mode 100644 index 000000000..080a2275c --- /dev/null +++ b/pkg/models/jsonschema/folder.go @@ -0,0 +1,56 @@ +package jsonschema + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/hash/md5" + "github.com/stashapp/stash/pkg/models/json" +) + +type Folder struct { + BaseDirEntry + + Path string `json:"path,omitempty"` + + CreatedAt json.JSONTime `json:"created_at,omitempty"` + UpdatedAt json.JSONTime `json:"updated_at,omitempty"` +} + +func (f *Folder) Filename() string { + // prefix with the path depth so that we can import lower-level folders first + depth := strings.Count(f.Path, string(filepath.Separator)) + + // hash the full path for a unique filename + hash := md5.FromString(f.Path) + + basename := filepath.Base(f.Path) + + return fmt.Sprintf("%2x.%s.%s.json", depth, basename, hash) +} + +func LoadFolderFile(filePath string) (*Folder, error) { + var folder Folder + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer file.Close() + var json = jsoniter.ConfigCompatibleWithStandardLibrary + jsonParser := json.NewDecoder(file) + err = jsonParser.Decode(&folder) + if err != nil { + return nil, err + } + return &folder, nil +} + +func SaveFolderFile(filePath string, folder *Folder) error { + if folder == nil { + return fmt.Errorf("folder must not be nil") + } + return marshalToFile(filePath, folder) +} diff --git a/pkg/models/jsonschema/gallery.go b/pkg/models/jsonschema/gallery.go index 6885f001f..596e7c610 100644 --- a/pkg/models/jsonschema/gallery.go +++ b/pkg/models/jsonschema/gallery.go @@ -3,27 +3,38 @@ package jsonschema import ( "fmt" "os" + "strings" jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/models/json" ) type Gallery struct { - Path string `json:"path,omitempty"` - Checksum string `json:"checksum,omitempty"` - Zip bool `json:"zip,omitempty"` - Title string `json:"title,omitempty"` - URL string `json:"url,omitempty"` - Date string `json:"date,omitempty"` - Details string `json:"details,omitempty"` - Rating int `json:"rating,omitempty"` - Organized bool `json:"organized,omitempty"` - Studio string `json:"studio,omitempty"` - Performers []string `json:"performers,omitempty"` - Tags []string `json:"tags,omitempty"` - FileModTime json.JSONTime `json:"file_mod_time,omitempty"` - CreatedAt json.JSONTime `json:"created_at,omitempty"` - UpdatedAt json.JSONTime `json:"updated_at,omitempty"` + ZipFiles []string `json:"zip_files,omitempty"` + FolderPath string `json:"folder_path,omitempty"` + Title string `json:"title,omitempty"` + URL string `json:"url,omitempty"` + Date string `json:"date,omitempty"` + Details string `json:"details,omitempty"` + Rating int `json:"rating,omitempty"` + Organized bool `json:"organized,omitempty"` + Studio string `json:"studio,omitempty"` + Performers []string `json:"performers,omitempty"` + Tags []string `json:"tags,omitempty"` + CreatedAt json.JSONTime `json:"created_at,omitempty"` + UpdatedAt json.JSONTime `json:"updated_at,omitempty"` +} + +func (s Gallery) Filename(basename string, hash string) string { + ret := fsutil.SanitiseBasename(basename) + + if ret != "" { + ret += "." + } + ret += hash + + return ret + ".json" } func LoadGalleryFile(filePath string) (*Gallery, error) { @@ -48,3 +59,23 @@ func SaveGalleryFile(filePath string, gallery *Gallery) error { } return marshalToFile(filePath, gallery) } + +// GalleryRef is used to identify a Gallery. +// Only one field should be populated. +type GalleryRef struct { + ZipFiles []string `json:"zip_files,omitempty"` + FolderPath string `json:"folder_path,omitempty"` + // Title is used only if FolderPath and ZipPaths is empty + Title string `json:"title,omitempty"` +} + +func (r GalleryRef) String() string { + switch { + case r.FolderPath != "": + return "{ folder: " + r.FolderPath + " }" + case len(r.ZipFiles) > 0: + return "{ zipFiles: [" + strings.Join(r.ZipFiles, ", ") + "] }" + default: + return "{ title: " + r.Title + " }" + } +} diff --git a/pkg/models/jsonschema/image.go b/pkg/models/jsonschema/image.go index dc4f7f525..364daa0cf 100644 --- a/pkg/models/jsonschema/image.go +++ b/pkg/models/jsonschema/image.go @@ -5,31 +5,37 @@ import ( "os" jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/models/json" ) -type ImageFile struct { - ModTime json.JSONTime `json:"mod_time,omitempty"` - Size int `json:"size"` - Width int `json:"width"` - Height int `json:"height"` -} - type Image struct { Title string `json:"title,omitempty"` - Checksum string `json:"checksum,omitempty"` Studio string `json:"studio,omitempty"` Rating int `json:"rating,omitempty"` Organized bool `json:"organized,omitempty"` OCounter int `json:"o_counter,omitempty"` - Galleries []string `json:"galleries,omitempty"` + Galleries []GalleryRef `json:"galleries,omitempty"` Performers []string `json:"performers,omitempty"` Tags []string `json:"tags,omitempty"` - File *ImageFile `json:"file,omitempty"` + Files []string `json:"files,omitempty"` CreatedAt json.JSONTime `json:"created_at,omitempty"` UpdatedAt json.JSONTime `json:"updated_at,omitempty"` } +func (s Image) Filename(basename string, hash string) string { + ret := fsutil.SanitiseBasename(s.Title) + if ret == "" { + ret = basename + } + + if hash != "" { + ret += "." + hash + } + + return ret + ".json" +} + func LoadImageFile(filePath string) (*Image, error) { var image Image file, err := os.Open(filePath) diff --git a/pkg/models/jsonschema/mappings.go b/pkg/models/jsonschema/mappings.go deleted file mode 100644 index 8f20ae755..000000000 --- a/pkg/models/jsonschema/mappings.go +++ /dev/null @@ -1,47 +0,0 @@ -package jsonschema - -import ( - "fmt" - "os" - - jsoniter "github.com/json-iterator/go" -) - -type PathNameMapping struct { - Path string `json:"path,omitempty"` - Name string `json:"name,omitempty"` - Checksum string `json:"checksum"` -} - -type Mappings struct { - Tags []PathNameMapping `json:"tags"` - Performers []PathNameMapping `json:"performers"` - Studios []PathNameMapping `json:"studios"` - Movies []PathNameMapping `json:"movies"` - Galleries []PathNameMapping `json:"galleries"` - Scenes []PathNameMapping `json:"scenes"` - Images []PathNameMapping `json:"images"` -} - -func LoadMappingsFile(filePath string) (*Mappings, error) { - var mappings Mappings - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - defer file.Close() - var json = jsoniter.ConfigCompatibleWithStandardLibrary - jsonParser := json.NewDecoder(file) - err = jsonParser.Decode(&mappings) - if err != nil { - return nil, err - } - return &mappings, nil -} - -func SaveMappingsFile(filePath string, mappings *Mappings) error { - if mappings == nil { - return fmt.Errorf("mappings must not be nil") - } - return marshalToFile(filePath, mappings) -} diff --git a/pkg/models/jsonschema/movie.go b/pkg/models/jsonschema/movie.go index d4eded802..d787f8288 100644 --- a/pkg/models/jsonschema/movie.go +++ b/pkg/models/jsonschema/movie.go @@ -6,6 +6,7 @@ import ( jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models/json" ) @@ -26,6 +27,10 @@ type Movie struct { UpdatedAt json.JSONTime `json:"updated_at,omitempty"` } +func (s Movie) Filename() string { + return fsutil.SanitiseBasename(s.Name) + ".json" +} + // Backwards Compatible synopsis for the movie type MovieSynopsisBC struct { Synopsis string `json:"sypnopsis,omitempty"` diff --git a/pkg/models/jsonschema/performer.go b/pkg/models/jsonschema/performer.go index 89677d715..ad33452f3 100644 --- a/pkg/models/jsonschema/performer.go +++ b/pkg/models/jsonschema/performer.go @@ -5,6 +5,7 @@ import ( "os" jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" ) @@ -40,6 +41,10 @@ type Performer struct { IgnoreAutoTag bool `json:"ignore_auto_tag,omitempty"` } +func (s Performer) Filename() string { + return fsutil.SanitiseBasename(s.Name) + ".json" +} + func LoadPerformerFile(filePath string) (*Performer, error) { var performer Performer file, err := os.Open(filePath) diff --git a/pkg/models/jsonschema/scene.go b/pkg/models/jsonschema/scene.go index 1984cf130..425ca10e8 100644 --- a/pkg/models/jsonschema/scene.go +++ b/pkg/models/jsonschema/scene.go @@ -5,6 +5,7 @@ import ( "os" jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" ) @@ -38,9 +39,6 @@ type SceneMovie struct { type Scene struct { Title string `json:"title,omitempty"` - Checksum string `json:"checksum,omitempty"` - OSHash string `json:"oshash,omitempty"` - Phash string `json:"phash,omitempty"` Studio string `json:"studio,omitempty"` URL string `json:"url,omitempty"` Date string `json:"date,omitempty"` @@ -48,18 +46,31 @@ type Scene struct { Organized bool `json:"organized,omitempty"` OCounter int `json:"o_counter,omitempty"` Details string `json:"details,omitempty"` - Galleries []string `json:"galleries,omitempty"` + Galleries []GalleryRef `json:"galleries,omitempty"` Performers []string `json:"performers,omitempty"` Movies []SceneMovie `json:"movies,omitempty"` Tags []string `json:"tags,omitempty"` Markers []SceneMarker `json:"markers,omitempty"` - File *SceneFile `json:"file,omitempty"` + Files []string `json:"files,omitempty"` Cover string `json:"cover,omitempty"` CreatedAt json.JSONTime `json:"created_at,omitempty"` UpdatedAt json.JSONTime `json:"updated_at,omitempty"` StashIDs []models.StashID `json:"stash_ids,omitempty"` } +func (s Scene) Filename(basename string, hash string) string { + ret := fsutil.SanitiseBasename(s.Title) + if ret == "" { + ret = basename + } + + if hash != "" { + ret += "." + hash + } + + return ret + ".json" +} + func LoadSceneFile(filePath string) (*Scene, error) { var scene Scene file, err := os.Open(filePath) diff --git a/pkg/models/jsonschema/studio.go b/pkg/models/jsonschema/studio.go index dad65a569..d6932a28c 100644 --- a/pkg/models/jsonschema/studio.go +++ b/pkg/models/jsonschema/studio.go @@ -5,6 +5,7 @@ import ( "os" jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" ) @@ -23,6 +24,10 @@ type Studio struct { IgnoreAutoTag bool `json:"ignore_auto_tag,omitempty"` } +func (s Studio) Filename() string { + return fsutil.SanitiseBasename(s.Name) + ".json" +} + func LoadStudioFile(filePath string) (*Studio, error) { var studio Studio file, err := os.Open(filePath) diff --git a/pkg/models/jsonschema/tag.go b/pkg/models/jsonschema/tag.go index 6be4643bd..5f7e0bfa7 100644 --- a/pkg/models/jsonschema/tag.go +++ b/pkg/models/jsonschema/tag.go @@ -5,6 +5,7 @@ import ( "os" jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/models/json" ) @@ -18,6 +19,10 @@ type Tag struct { UpdatedAt json.JSONTime `json:"updated_at,omitempty"` } +func (s Tag) Filename() string { + return fsutil.SanitiseBasename(s.Name) + ".json" +} + func LoadTagFile(filePath string) (*Tag, error) { var tag Tag file, err := os.Open(filePath) diff --git a/pkg/models/mocks/GalleryReaderWriter.go b/pkg/models/mocks/GalleryReaderWriter.go index 9731147fe..1c0ddf957 100644 --- a/pkg/models/mocks/GalleryReaderWriter.go +++ b/pkg/models/mocks/GalleryReaderWriter.go @@ -3,8 +3,12 @@ package mocks import ( - models "github.com/stashapp/stash/pkg/models" + context "context" + + file "github.com/stashapp/stash/pkg/file" mock "github.com/stretchr/testify/mock" + + models "github.com/stashapp/stash/pkg/models" ) // GalleryReaderWriter is an autogenerated mock type for the GalleryReaderWriter type @@ -12,13 +16,13 @@ type GalleryReaderWriter struct { mock.Mock } -// All provides a mock function with given fields: -func (_m *GalleryReaderWriter) All() ([]*models.Gallery, error) { - ret := _m.Called() +// All provides a mock function with given fields: ctx +func (_m *GalleryReaderWriter) All(ctx context.Context) ([]*models.Gallery, error) { + ret := _m.Called(ctx) var r0 []*models.Gallery - if rf, ok := ret.Get(0).(func() []*models.Gallery); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []*models.Gallery); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Gallery) @@ -26,8 +30,8 @@ func (_m *GalleryReaderWriter) All() ([]*models.Gallery, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -35,20 +39,20 @@ func (_m *GalleryReaderWriter) All() ([]*models.Gallery, error) { return r0, r1 } -// Count provides a mock function with given fields: -func (_m *GalleryReaderWriter) Count() (int, error) { - ret := _m.Called() +// Count provides a mock function with given fields: ctx +func (_m *GalleryReaderWriter) Count(ctx context.Context) (int, error) { + ret := _m.Called(ctx) var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -56,36 +60,13 @@ func (_m *GalleryReaderWriter) Count() (int, error) { return r0, r1 } -// Create provides a mock function with given fields: newGallery -func (_m *GalleryReaderWriter) Create(newGallery models.Gallery) (*models.Gallery, error) { - ret := _m.Called(newGallery) - - var r0 *models.Gallery - if rf, ok := ret.Get(0).(func(models.Gallery) *models.Gallery); ok { - r0 = rf(newGallery) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Gallery) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(models.Gallery) error); ok { - r1 = rf(newGallery) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Destroy provides a mock function with given fields: id -func (_m *GalleryReaderWriter) Destroy(id int) error { - ret := _m.Called(id) +// Create provides a mock function with given fields: ctx, newGallery, fileIDs +func (_m *GalleryReaderWriter) Create(ctx context.Context, newGallery *models.Gallery, fileIDs []file.ID) error { + ret := _m.Called(ctx, newGallery, fileIDs) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, *models.Gallery, []file.ID) error); ok { + r0 = rf(ctx, newGallery, fileIDs) } else { r0 = ret.Error(0) } @@ -93,13 +74,27 @@ func (_m *GalleryReaderWriter) Destroy(id int) error { return r0 } -// Find provides a mock function with given fields: id -func (_m *GalleryReaderWriter) Find(id int) (*models.Gallery, error) { - ret := _m.Called(id) +// Destroy provides a mock function with given fields: ctx, id +func (_m *GalleryReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Find provides a mock function with given fields: ctx, id +func (_m *GalleryReaderWriter) Find(ctx context.Context, id int) (*models.Gallery, error) { + ret := _m.Called(ctx, id) var r0 *models.Gallery - if rf, ok := ret.Get(0).(func(int) *models.Gallery); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) *models.Gallery); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Gallery) @@ -107,8 +102,8 @@ func (_m *GalleryReaderWriter) Find(id int) (*models.Gallery, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -116,36 +111,13 @@ func (_m *GalleryReaderWriter) Find(id int) (*models.Gallery, error) { return r0, r1 } -// FindByChecksum provides a mock function with given fields: checksum -func (_m *GalleryReaderWriter) FindByChecksum(checksum string) (*models.Gallery, error) { - ret := _m.Called(checksum) - - var r0 *models.Gallery - if rf, ok := ret.Get(0).(func(string) *models.Gallery); ok { - r0 = rf(checksum) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Gallery) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(checksum) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FindByChecksums provides a mock function with given fields: checksums -func (_m *GalleryReaderWriter) FindByChecksums(checksums []string) ([]*models.Gallery, error) { - ret := _m.Called(checksums) +// FindByChecksum provides a mock function with given fields: ctx, checksum +func (_m *GalleryReaderWriter) FindByChecksum(ctx context.Context, checksum string) ([]*models.Gallery, error) { + ret := _m.Called(ctx, checksum) var r0 []*models.Gallery - if rf, ok := ret.Get(0).(func([]string) []*models.Gallery); ok { - r0 = rf(checksums) + if rf, ok := ret.Get(0).(func(context.Context, string) []*models.Gallery); ok { + r0 = rf(ctx, checksum) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Gallery) @@ -153,8 +125,8 @@ func (_m *GalleryReaderWriter) FindByChecksums(checksums []string) ([]*models.Ga } var r1 error - if rf, ok := ret.Get(1).(func([]string) error); ok { - r1 = rf(checksums) + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, checksum) } else { r1 = ret.Error(1) } @@ -162,13 +134,13 @@ func (_m *GalleryReaderWriter) FindByChecksums(checksums []string) ([]*models.Ga return r0, r1 } -// FindByImageID provides a mock function with given fields: imageID -func (_m *GalleryReaderWriter) FindByImageID(imageID int) ([]*models.Gallery, error) { - ret := _m.Called(imageID) +// FindByChecksums provides a mock function with given fields: ctx, checksums +func (_m *GalleryReaderWriter) FindByChecksums(ctx context.Context, checksums []string) ([]*models.Gallery, error) { + ret := _m.Called(ctx, checksums) var r0 []*models.Gallery - if rf, ok := ret.Get(0).(func(int) []*models.Gallery); ok { - r0 = rf(imageID) + if rf, ok := ret.Get(0).(func(context.Context, []string) []*models.Gallery); ok { + r0 = rf(ctx, checksums) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Gallery) @@ -176,8 +148,8 @@ func (_m *GalleryReaderWriter) FindByImageID(imageID int) ([]*models.Gallery, er } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(imageID) + if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { + r1 = rf(ctx, checksums) } else { r1 = ret.Error(1) } @@ -185,36 +157,13 @@ func (_m *GalleryReaderWriter) FindByImageID(imageID int) ([]*models.Gallery, er return r0, r1 } -// FindByPath provides a mock function with given fields: path -func (_m *GalleryReaderWriter) FindByPath(path string) (*models.Gallery, error) { - ret := _m.Called(path) - - var r0 *models.Gallery - if rf, ok := ret.Get(0).(func(string) *models.Gallery); ok { - r0 = rf(path) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Gallery) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(path) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FindBySceneID provides a mock function with given fields: sceneID -func (_m *GalleryReaderWriter) FindBySceneID(sceneID int) ([]*models.Gallery, error) { - ret := _m.Called(sceneID) +// FindByImageID provides a mock function with given fields: ctx, imageID +func (_m *GalleryReaderWriter) FindByImageID(ctx context.Context, imageID int) ([]*models.Gallery, error) { + ret := _m.Called(ctx, imageID) var r0 []*models.Gallery - if rf, ok := ret.Get(0).(func(int) []*models.Gallery); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Gallery); ok { + r0 = rf(ctx, imageID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Gallery) @@ -222,8 +171,8 @@ func (_m *GalleryReaderWriter) FindBySceneID(sceneID int) ([]*models.Gallery, er } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, imageID) } else { r1 = ret.Error(1) } @@ -231,13 +180,13 @@ func (_m *GalleryReaderWriter) FindBySceneID(sceneID int) ([]*models.Gallery, er return r0, r1 } -// FindMany provides a mock function with given fields: ids -func (_m *GalleryReaderWriter) FindMany(ids []int) ([]*models.Gallery, error) { - ret := _m.Called(ids) +// FindByPath provides a mock function with given fields: ctx, path +func (_m *GalleryReaderWriter) FindByPath(ctx context.Context, path string) ([]*models.Gallery, error) { + ret := _m.Called(ctx, path) var r0 []*models.Gallery - if rf, ok := ret.Get(0).(func([]int) []*models.Gallery); ok { - r0 = rf(ids) + if rf, ok := ret.Get(0).(func(context.Context, string) []*models.Gallery); ok { + r0 = rf(ctx, path) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Gallery) @@ -245,8 +194,8 @@ func (_m *GalleryReaderWriter) FindMany(ids []int) ([]*models.Gallery, error) { } var r1 error - if rf, ok := ret.Get(1).(func([]int) error); ok { - r1 = rf(ids) + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, path) } else { r1 = ret.Error(1) } @@ -254,105 +203,151 @@ func (_m *GalleryReaderWriter) FindMany(ids []int) ([]*models.Gallery, error) { return r0, r1 } -// GetImageIDs provides a mock function with given fields: galleryID -func (_m *GalleryReaderWriter) GetImageIDs(galleryID int) ([]int, error) { - ret := _m.Called(galleryID) - - var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(galleryID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(galleryID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetPerformerIDs provides a mock function with given fields: galleryID -func (_m *GalleryReaderWriter) GetPerformerIDs(galleryID int) ([]int, error) { - ret := _m.Called(galleryID) - - var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(galleryID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(galleryID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetSceneIDs provides a mock function with given fields: galleryID -func (_m *GalleryReaderWriter) GetSceneIDs(galleryID int) ([]int, error) { - ret := _m.Called(galleryID) - - var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(galleryID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(galleryID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetTagIDs provides a mock function with given fields: galleryID -func (_m *GalleryReaderWriter) GetTagIDs(galleryID int) ([]int, error) { - ret := _m.Called(galleryID) - - var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(galleryID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(galleryID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query provides a mock function with given fields: galleryFilter, findFilter -func (_m *GalleryReaderWriter) Query(galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) ([]*models.Gallery, int, error) { - ret := _m.Called(galleryFilter, findFilter) +// FindBySceneID provides a mock function with given fields: ctx, sceneID +func (_m *GalleryReaderWriter) FindBySceneID(ctx context.Context, sceneID int) ([]*models.Gallery, error) { + ret := _m.Called(ctx, sceneID) var r0 []*models.Gallery - if rf, ok := ret.Get(0).(func(*models.GalleryFilterType, *models.FindFilterType) []*models.Gallery); ok { - r0 = rf(galleryFilter, findFilter) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Gallery); ok { + r0 = rf(ctx, sceneID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Gallery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, sceneID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindMany provides a mock function with given fields: ctx, ids +func (_m *GalleryReaderWriter) FindMany(ctx context.Context, ids []int) ([]*models.Gallery, error) { + ret := _m.Called(ctx, ids) + + var r0 []*models.Gallery + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.Gallery); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Gallery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetImageIDs provides a mock function with given fields: ctx, galleryID +func (_m *GalleryReaderWriter) GetImageIDs(ctx context.Context, galleryID int) ([]int, error) { + ret := _m.Called(ctx, galleryID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, galleryID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, galleryID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPerformerIDs provides a mock function with given fields: ctx, relatedID +func (_m *GalleryReaderWriter) GetPerformerIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSceneIDs provides a mock function with given fields: ctx, relatedID +func (_m *GalleryReaderWriter) GetSceneIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTagIDs provides a mock function with given fields: ctx, relatedID +func (_m *GalleryReaderWriter) GetTagIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Query provides a mock function with given fields: ctx, galleryFilter, findFilter +func (_m *GalleryReaderWriter) Query(ctx context.Context, galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) ([]*models.Gallery, int, error) { + ret := _m.Called(ctx, galleryFilter, findFilter) + + var r0 []*models.Gallery + if rf, ok := ret.Get(0).(func(context.Context, *models.GalleryFilterType, *models.FindFilterType) []*models.Gallery); ok { + r0 = rf(ctx, galleryFilter, findFilter) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Gallery) @@ -360,15 +355,15 @@ func (_m *GalleryReaderWriter) Query(galleryFilter *models.GalleryFilterType, fi } var r1 int - if rf, ok := ret.Get(1).(func(*models.GalleryFilterType, *models.FindFilterType) int); ok { - r1 = rf(galleryFilter, findFilter) + if rf, ok := ret.Get(1).(func(context.Context, *models.GalleryFilterType, *models.FindFilterType) int); ok { + r1 = rf(ctx, galleryFilter, findFilter) } else { r1 = ret.Get(1).(int) } var r2 error - if rf, ok := ret.Get(2).(func(*models.GalleryFilterType, *models.FindFilterType) error); ok { - r2 = rf(galleryFilter, findFilter) + if rf, ok := ret.Get(2).(func(context.Context, *models.GalleryFilterType, *models.FindFilterType) error); ok { + r2 = rf(ctx, galleryFilter, findFilter) } else { r2 = ret.Error(2) } @@ -376,20 +371,20 @@ func (_m *GalleryReaderWriter) Query(galleryFilter *models.GalleryFilterType, fi return r0, r1, r2 } -// QueryCount provides a mock function with given fields: galleryFilter, findFilter -func (_m *GalleryReaderWriter) QueryCount(galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) (int, error) { - ret := _m.Called(galleryFilter, findFilter) +// QueryCount provides a mock function with given fields: ctx, galleryFilter, findFilter +func (_m *GalleryReaderWriter) QueryCount(ctx context.Context, galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) (int, error) { + ret := _m.Called(ctx, galleryFilter, findFilter) var r0 int - if rf, ok := ret.Get(0).(func(*models.GalleryFilterType, *models.FindFilterType) int); ok { - r0 = rf(galleryFilter, findFilter) + if rf, ok := ret.Get(0).(func(context.Context, *models.GalleryFilterType, *models.FindFilterType) int); ok { + r0 = rf(ctx, galleryFilter, findFilter) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(*models.GalleryFilterType, *models.FindFilterType) error); ok { - r1 = rf(galleryFilter, findFilter) + if rf, ok := ret.Get(1).(func(context.Context, *models.GalleryFilterType, *models.FindFilterType) error); ok { + r1 = rf(ctx, galleryFilter, findFilter) } else { r1 = ret.Error(1) } @@ -397,13 +392,41 @@ func (_m *GalleryReaderWriter) QueryCount(galleryFilter *models.GalleryFilterTyp return r0, r1 } -// Update provides a mock function with given fields: updatedGallery -func (_m *GalleryReaderWriter) Update(updatedGallery models.Gallery) (*models.Gallery, error) { - ret := _m.Called(updatedGallery) +// Update provides a mock function with given fields: ctx, updatedGallery +func (_m *GalleryReaderWriter) Update(ctx context.Context, updatedGallery *models.Gallery) error { + ret := _m.Called(ctx, updatedGallery) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *models.Gallery) error); ok { + r0 = rf(ctx, updatedGallery) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateImages provides a mock function with given fields: ctx, galleryID, imageIDs +func (_m *GalleryReaderWriter) UpdateImages(ctx context.Context, galleryID int, imageIDs []int) error { + ret := _m.Called(ctx, galleryID, imageIDs) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int, []int) error); ok { + r0 = rf(ctx, galleryID, imageIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdatePartial provides a mock function with given fields: ctx, id, updatedGallery +func (_m *GalleryReaderWriter) UpdatePartial(ctx context.Context, id int, updatedGallery models.GalleryPartial) (*models.Gallery, error) { + ret := _m.Called(ctx, id, updatedGallery) var r0 *models.Gallery - if rf, ok := ret.Get(0).(func(models.Gallery) *models.Gallery); ok { - r0 = rf(updatedGallery) + if rf, ok := ret.Get(0).(func(context.Context, int, models.GalleryPartial) *models.Gallery); ok { + r0 = rf(ctx, id, updatedGallery) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Gallery) @@ -411,104 +434,11 @@ func (_m *GalleryReaderWriter) Update(updatedGallery models.Gallery) (*models.Ga } var r1 error - if rf, ok := ret.Get(1).(func(models.Gallery) error); ok { - r1 = rf(updatedGallery) + if rf, ok := ret.Get(1).(func(context.Context, int, models.GalleryPartial) error); ok { + r1 = rf(ctx, id, updatedGallery) } else { r1 = ret.Error(1) } return r0, r1 } - -// UpdateFileModTime provides a mock function with given fields: id, modTime -func (_m *GalleryReaderWriter) UpdateFileModTime(id int, modTime models.NullSQLiteTimestamp) error { - ret := _m.Called(id, modTime) - - var r0 error - if rf, ok := ret.Get(0).(func(int, models.NullSQLiteTimestamp) error); ok { - r0 = rf(id, modTime) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateImages provides a mock function with given fields: galleryID, imageIDs -func (_m *GalleryReaderWriter) UpdateImages(galleryID int, imageIDs []int) error { - ret := _m.Called(galleryID, imageIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(galleryID, imageIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdatePartial provides a mock function with given fields: updatedGallery -func (_m *GalleryReaderWriter) UpdatePartial(updatedGallery models.GalleryPartial) (*models.Gallery, error) { - ret := _m.Called(updatedGallery) - - var r0 *models.Gallery - if rf, ok := ret.Get(0).(func(models.GalleryPartial) *models.Gallery); ok { - r0 = rf(updatedGallery) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Gallery) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(models.GalleryPartial) error); ok { - r1 = rf(updatedGallery) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UpdatePerformers provides a mock function with given fields: galleryID, performerIDs -func (_m *GalleryReaderWriter) UpdatePerformers(galleryID int, performerIDs []int) error { - ret := _m.Called(galleryID, performerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(galleryID, performerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateScenes provides a mock function with given fields: galleryID, sceneIDs -func (_m *GalleryReaderWriter) UpdateScenes(galleryID int, sceneIDs []int) error { - ret := _m.Called(galleryID, sceneIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(galleryID, sceneIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateTags provides a mock function with given fields: galleryID, tagIDs -func (_m *GalleryReaderWriter) UpdateTags(galleryID int, tagIDs []int) error { - ret := _m.Called(galleryID, tagIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(galleryID, tagIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/pkg/models/mocks/ImageReaderWriter.go b/pkg/models/mocks/ImageReaderWriter.go index 5a13ad986..41468ceb2 100644 --- a/pkg/models/mocks/ImageReaderWriter.go +++ b/pkg/models/mocks/ImageReaderWriter.go @@ -3,6 +3,8 @@ package mocks import ( + context "context" + models "github.com/stashapp/stash/pkg/models" mock "github.com/stretchr/testify/mock" ) @@ -12,13 +14,13 @@ type ImageReaderWriter struct { mock.Mock } -// All provides a mock function with given fields: -func (_m *ImageReaderWriter) All() ([]*models.Image, error) { - ret := _m.Called() +// All provides a mock function with given fields: ctx +func (_m *ImageReaderWriter) All(ctx context.Context) ([]*models.Image, error) { + ret := _m.Called(ctx) var r0 []*models.Image - if rf, ok := ret.Get(0).(func() []*models.Image); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []*models.Image); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Image) @@ -26,8 +28,8 @@ func (_m *ImageReaderWriter) All() ([]*models.Image, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -35,20 +37,20 @@ func (_m *ImageReaderWriter) All() ([]*models.Image, error) { return r0, r1 } -// Count provides a mock function with given fields: -func (_m *ImageReaderWriter) Count() (int, error) { - ret := _m.Called() +// Count provides a mock function with given fields: ctx +func (_m *ImageReaderWriter) Count(ctx context.Context) (int, error) { + ret := _m.Called(ctx) var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -56,20 +58,20 @@ func (_m *ImageReaderWriter) Count() (int, error) { return r0, r1 } -// CountByGalleryID provides a mock function with given fields: galleryID -func (_m *ImageReaderWriter) CountByGalleryID(galleryID int) (int, error) { - ret := _m.Called(galleryID) +// CountByGalleryID provides a mock function with given fields: ctx, galleryID +func (_m *ImageReaderWriter) CountByGalleryID(ctx context.Context, galleryID int) (int, error) { + ret := _m.Called(ctx, galleryID) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(galleryID) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, galleryID) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(galleryID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, galleryID) } else { r1 = ret.Error(1) } @@ -77,57 +79,13 @@ func (_m *ImageReaderWriter) CountByGalleryID(galleryID int) (int, error) { return r0, r1 } -// Create provides a mock function with given fields: newImage -func (_m *ImageReaderWriter) Create(newImage models.Image) (*models.Image, error) { - ret := _m.Called(newImage) - - var r0 *models.Image - if rf, ok := ret.Get(0).(func(models.Image) *models.Image); ok { - r0 = rf(newImage) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Image) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(models.Image) error); ok { - r1 = rf(newImage) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecrementOCounter provides a mock function with given fields: id -func (_m *ImageReaderWriter) DecrementOCounter(id int) (int, error) { - ret := _m.Called(id) - - var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(id) - } else { - r0 = ret.Get(0).(int) - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Destroy provides a mock function with given fields: id -func (_m *ImageReaderWriter) Destroy(id int) error { - ret := _m.Called(id) +// Create provides a mock function with given fields: ctx, newImage +func (_m *ImageReaderWriter) Create(ctx context.Context, newImage *models.ImageCreateInput) error { + ret := _m.Called(ctx, newImage) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, *models.ImageCreateInput) error); ok { + r0 = rf(ctx, newImage) } else { r0 = ret.Error(0) } @@ -135,204 +93,20 @@ func (_m *ImageReaderWriter) Destroy(id int) error { return r0 } -// Find provides a mock function with given fields: id -func (_m *ImageReaderWriter) Find(id int) (*models.Image, error) { - ret := _m.Called(id) - - var r0 *models.Image - if rf, ok := ret.Get(0).(func(int) *models.Image); ok { - r0 = rf(id) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Image) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FindByChecksum provides a mock function with given fields: checksum -func (_m *ImageReaderWriter) FindByChecksum(checksum string) (*models.Image, error) { - ret := _m.Called(checksum) - - var r0 *models.Image - if rf, ok := ret.Get(0).(func(string) *models.Image); ok { - r0 = rf(checksum) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Image) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(checksum) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FindByGalleryID provides a mock function with given fields: galleryID -func (_m *ImageReaderWriter) FindByGalleryID(galleryID int) ([]*models.Image, error) { - ret := _m.Called(galleryID) - - var r0 []*models.Image - if rf, ok := ret.Get(0).(func(int) []*models.Image); ok { - r0 = rf(galleryID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*models.Image) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(galleryID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FindByPath provides a mock function with given fields: path -func (_m *ImageReaderWriter) FindByPath(path string) (*models.Image, error) { - ret := _m.Called(path) - - var r0 *models.Image - if rf, ok := ret.Get(0).(func(string) *models.Image); ok { - r0 = rf(path) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Image) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(path) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FindMany provides a mock function with given fields: ids -func (_m *ImageReaderWriter) FindMany(ids []int) ([]*models.Image, error) { - ret := _m.Called(ids) - - var r0 []*models.Image - if rf, ok := ret.Get(0).(func([]int) []*models.Image); ok { - r0 = rf(ids) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*models.Image) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]int) error); ok { - r1 = rf(ids) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetGalleryIDs provides a mock function with given fields: imageID -func (_m *ImageReaderWriter) GetGalleryIDs(imageID int) ([]int, error) { - ret := _m.Called(imageID) - - var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(imageID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(imageID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetPerformerIDs provides a mock function with given fields: imageID -func (_m *ImageReaderWriter) GetPerformerIDs(imageID int) ([]int, error) { - ret := _m.Called(imageID) - - var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(imageID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(imageID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetTagIDs provides a mock function with given fields: imageID -func (_m *ImageReaderWriter) GetTagIDs(imageID int) ([]int, error) { - ret := _m.Called(imageID) - - var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(imageID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]int) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(imageID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IncrementOCounter provides a mock function with given fields: id -func (_m *ImageReaderWriter) IncrementOCounter(id int) (int, error) { - ret := _m.Called(id) +// DecrementOCounter provides a mock function with given fields: ctx, id +func (_m *ImageReaderWriter) DecrementOCounter(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -340,13 +114,209 @@ func (_m *ImageReaderWriter) IncrementOCounter(id int) (int, error) { return r0, r1 } -// Query provides a mock function with given fields: options -func (_m *ImageReaderWriter) Query(options models.ImageQueryOptions) (*models.ImageQueryResult, error) { - ret := _m.Called(options) +// Destroy provides a mock function with given fields: ctx, id +func (_m *ImageReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Find provides a mock function with given fields: ctx, id +func (_m *ImageReaderWriter) Find(ctx context.Context, id int) (*models.Image, error) { + ret := _m.Called(ctx, id) + + var r0 *models.Image + if rf, ok := ret.Get(0).(func(context.Context, int) *models.Image); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.Image) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByChecksum provides a mock function with given fields: ctx, checksum +func (_m *ImageReaderWriter) FindByChecksum(ctx context.Context, checksum string) ([]*models.Image, error) { + ret := _m.Called(ctx, checksum) + + var r0 []*models.Image + if rf, ok := ret.Get(0).(func(context.Context, string) []*models.Image); ok { + r0 = rf(ctx, checksum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Image) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, checksum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByGalleryID provides a mock function with given fields: ctx, galleryID +func (_m *ImageReaderWriter) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Image, error) { + ret := _m.Called(ctx, galleryID) + + var r0 []*models.Image + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Image); ok { + r0 = rf(ctx, galleryID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Image) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, galleryID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindMany provides a mock function with given fields: ctx, ids +func (_m *ImageReaderWriter) FindMany(ctx context.Context, ids []int) ([]*models.Image, error) { + ret := _m.Called(ctx, ids) + + var r0 []*models.Image + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.Image); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Image) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetGalleryIDs provides a mock function with given fields: ctx, relatedID +func (_m *ImageReaderWriter) GetGalleryIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPerformerIDs provides a mock function with given fields: ctx, relatedID +func (_m *ImageReaderWriter) GetPerformerIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTagIDs provides a mock function with given fields: ctx, relatedID +func (_m *ImageReaderWriter) GetTagIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IncrementOCounter provides a mock function with given fields: ctx, id +func (_m *ImageReaderWriter) IncrementOCounter(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Query provides a mock function with given fields: ctx, options +func (_m *ImageReaderWriter) Query(ctx context.Context, options models.ImageQueryOptions) (*models.ImageQueryResult, error) { + ret := _m.Called(ctx, options) var r0 *models.ImageQueryResult - if rf, ok := ret.Get(0).(func(models.ImageQueryOptions) *models.ImageQueryResult); ok { - r0 = rf(options) + if rf, ok := ret.Get(0).(func(context.Context, models.ImageQueryOptions) *models.ImageQueryResult); ok { + r0 = rf(ctx, options) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.ImageQueryResult) @@ -354,8 +324,8 @@ func (_m *ImageReaderWriter) Query(options models.ImageQueryOptions) (*models.Im } var r1 error - if rf, ok := ret.Get(1).(func(models.ImageQueryOptions) error); ok { - r1 = rf(options) + if rf, ok := ret.Get(1).(func(context.Context, models.ImageQueryOptions) error); ok { + r1 = rf(ctx, options) } else { r1 = ret.Error(1) } @@ -363,20 +333,20 @@ func (_m *ImageReaderWriter) Query(options models.ImageQueryOptions) (*models.Im return r0, r1 } -// QueryCount provides a mock function with given fields: imageFilter, findFilter -func (_m *ImageReaderWriter) QueryCount(imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) (int, error) { - ret := _m.Called(imageFilter, findFilter) +// QueryCount provides a mock function with given fields: ctx, imageFilter, findFilter +func (_m *ImageReaderWriter) QueryCount(ctx context.Context, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) (int, error) { + ret := _m.Called(ctx, imageFilter, findFilter) var r0 int - if rf, ok := ret.Get(0).(func(*models.ImageFilterType, *models.FindFilterType) int); ok { - r0 = rf(imageFilter, findFilter) + if rf, ok := ret.Get(0).(func(context.Context, *models.ImageFilterType, *models.FindFilterType) int); ok { + r0 = rf(ctx, imageFilter, findFilter) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(*models.ImageFilterType, *models.FindFilterType) error); ok { - r1 = rf(imageFilter, findFilter) + if rf, ok := ret.Get(1).(func(context.Context, *models.ImageFilterType, *models.FindFilterType) error); ok { + r1 = rf(ctx, imageFilter, findFilter) } else { r1 = ret.Error(1) } @@ -384,20 +354,20 @@ func (_m *ImageReaderWriter) QueryCount(imageFilter *models.ImageFilterType, fin return r0, r1 } -// ResetOCounter provides a mock function with given fields: id -func (_m *ImageReaderWriter) ResetOCounter(id int) (int, error) { - ret := _m.Called(id) +// ResetOCounter provides a mock function with given fields: ctx, id +func (_m *ImageReaderWriter) ResetOCounter(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -405,20 +375,20 @@ func (_m *ImageReaderWriter) ResetOCounter(id int) (int, error) { return r0, r1 } -// Size provides a mock function with given fields: -func (_m *ImageReaderWriter) Size() (float64, error) { - ret := _m.Called() +// Size provides a mock function with given fields: ctx +func (_m *ImageReaderWriter) Size(ctx context.Context) (float64, error) { + ret := _m.Called(ctx) var r0 float64 - if rf, ok := ret.Get(0).(func() float64); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) float64); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(float64) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -426,13 +396,27 @@ func (_m *ImageReaderWriter) Size() (float64, error) { return r0, r1 } -// Update provides a mock function with given fields: updatedImage -func (_m *ImageReaderWriter) Update(updatedImage models.ImagePartial) (*models.Image, error) { - ret := _m.Called(updatedImage) +// Update provides a mock function with given fields: ctx, updatedImage +func (_m *ImageReaderWriter) Update(ctx context.Context, updatedImage *models.Image) error { + ret := _m.Called(ctx, updatedImage) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *models.Image) error); ok { + r0 = rf(ctx, updatedImage) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdatePartial provides a mock function with given fields: ctx, id, partial +func (_m *ImageReaderWriter) UpdatePartial(ctx context.Context, id int, partial models.ImagePartial) (*models.Image, error) { + ret := _m.Called(ctx, id, partial) var r0 *models.Image - if rf, ok := ret.Get(0).(func(models.ImagePartial) *models.Image); ok { - r0 = rf(updatedImage) + if rf, ok := ret.Get(0).(func(context.Context, int, models.ImagePartial) *models.Image); ok { + r0 = rf(ctx, id, partial) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Image) @@ -440,76 +424,11 @@ func (_m *ImageReaderWriter) Update(updatedImage models.ImagePartial) (*models.I } var r1 error - if rf, ok := ret.Get(1).(func(models.ImagePartial) error); ok { - r1 = rf(updatedImage) + if rf, ok := ret.Get(1).(func(context.Context, int, models.ImagePartial) error); ok { + r1 = rf(ctx, id, partial) } else { r1 = ret.Error(1) } return r0, r1 } - -// UpdateFull provides a mock function with given fields: updatedImage -func (_m *ImageReaderWriter) UpdateFull(updatedImage models.Image) (*models.Image, error) { - ret := _m.Called(updatedImage) - - var r0 *models.Image - if rf, ok := ret.Get(0).(func(models.Image) *models.Image); ok { - r0 = rf(updatedImage) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Image) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(models.Image) error); ok { - r1 = rf(updatedImage) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UpdateGalleries provides a mock function with given fields: imageID, galleryIDs -func (_m *ImageReaderWriter) UpdateGalleries(imageID int, galleryIDs []int) error { - ret := _m.Called(imageID, galleryIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(imageID, galleryIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdatePerformers provides a mock function with given fields: imageID, performerIDs -func (_m *ImageReaderWriter) UpdatePerformers(imageID int, performerIDs []int) error { - ret := _m.Called(imageID, performerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(imageID, performerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateTags provides a mock function with given fields: imageID, tagIDs -func (_m *ImageReaderWriter) UpdateTags(imageID int, tagIDs []int) error { - ret := _m.Called(imageID, tagIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(imageID, tagIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/pkg/models/mocks/MovieReaderWriter.go b/pkg/models/mocks/MovieReaderWriter.go index 288eb6fd4..c125fc7b1 100644 --- a/pkg/models/mocks/MovieReaderWriter.go +++ b/pkg/models/mocks/MovieReaderWriter.go @@ -3,6 +3,8 @@ package mocks import ( + context "context" + models "github.com/stashapp/stash/pkg/models" mock "github.com/stretchr/testify/mock" ) @@ -12,13 +14,13 @@ type MovieReaderWriter struct { mock.Mock } -// All provides a mock function with given fields: -func (_m *MovieReaderWriter) All() ([]*models.Movie, error) { - ret := _m.Called() +// All provides a mock function with given fields: ctx +func (_m *MovieReaderWriter) All(ctx context.Context) ([]*models.Movie, error) { + ret := _m.Called(ctx) var r0 []*models.Movie - if rf, ok := ret.Get(0).(func() []*models.Movie); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []*models.Movie); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Movie) @@ -26,8 +28,8 @@ func (_m *MovieReaderWriter) All() ([]*models.Movie, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -35,20 +37,20 @@ func (_m *MovieReaderWriter) All() ([]*models.Movie, error) { return r0, r1 } -// Count provides a mock function with given fields: -func (_m *MovieReaderWriter) Count() (int, error) { - ret := _m.Called() +// Count provides a mock function with given fields: ctx +func (_m *MovieReaderWriter) Count(ctx context.Context) (int, error) { + ret := _m.Called(ctx) var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -56,20 +58,20 @@ func (_m *MovieReaderWriter) Count() (int, error) { return r0, r1 } -// CountByPerformerID provides a mock function with given fields: performerID -func (_m *MovieReaderWriter) CountByPerformerID(performerID int) (int, error) { - ret := _m.Called(performerID) +// CountByPerformerID provides a mock function with given fields: ctx, performerID +func (_m *MovieReaderWriter) CountByPerformerID(ctx context.Context, performerID int) (int, error) { + ret := _m.Called(ctx, performerID) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(performerID) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, performerID) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(performerID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) } else { r1 = ret.Error(1) } @@ -77,20 +79,20 @@ func (_m *MovieReaderWriter) CountByPerformerID(performerID int) (int, error) { return r0, r1 } -// CountByStudioID provides a mock function with given fields: studioID -func (_m *MovieReaderWriter) CountByStudioID(studioID int) (int, error) { - ret := _m.Called(studioID) +// CountByStudioID provides a mock function with given fields: ctx, studioID +func (_m *MovieReaderWriter) CountByStudioID(ctx context.Context, studioID int) (int, error) { + ret := _m.Called(ctx, studioID) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(studioID) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, studioID) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(studioID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, studioID) } else { r1 = ret.Error(1) } @@ -98,13 +100,13 @@ func (_m *MovieReaderWriter) CountByStudioID(studioID int) (int, error) { return r0, r1 } -// Create provides a mock function with given fields: newMovie -func (_m *MovieReaderWriter) Create(newMovie models.Movie) (*models.Movie, error) { - ret := _m.Called(newMovie) +// Create provides a mock function with given fields: ctx, newMovie +func (_m *MovieReaderWriter) Create(ctx context.Context, newMovie models.Movie) (*models.Movie, error) { + ret := _m.Called(ctx, newMovie) var r0 *models.Movie - if rf, ok := ret.Get(0).(func(models.Movie) *models.Movie); ok { - r0 = rf(newMovie) + if rf, ok := ret.Get(0).(func(context.Context, models.Movie) *models.Movie); ok { + r0 = rf(ctx, newMovie) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Movie) @@ -112,8 +114,8 @@ func (_m *MovieReaderWriter) Create(newMovie models.Movie) (*models.Movie, error } var r1 error - if rf, ok := ret.Get(1).(func(models.Movie) error); ok { - r1 = rf(newMovie) + if rf, ok := ret.Get(1).(func(context.Context, models.Movie) error); ok { + r1 = rf(ctx, newMovie) } else { r1 = ret.Error(1) } @@ -121,13 +123,13 @@ func (_m *MovieReaderWriter) Create(newMovie models.Movie) (*models.Movie, error return r0, r1 } -// Destroy provides a mock function with given fields: id -func (_m *MovieReaderWriter) Destroy(id int) error { - ret := _m.Called(id) +// Destroy provides a mock function with given fields: ctx, id +func (_m *MovieReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) } else { r0 = ret.Error(0) } @@ -135,13 +137,13 @@ func (_m *MovieReaderWriter) Destroy(id int) error { return r0 } -// DestroyImages provides a mock function with given fields: movieID -func (_m *MovieReaderWriter) DestroyImages(movieID int) error { - ret := _m.Called(movieID) +// DestroyImages provides a mock function with given fields: ctx, movieID +func (_m *MovieReaderWriter) DestroyImages(ctx context.Context, movieID int) error { + ret := _m.Called(ctx, movieID) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(movieID) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, movieID) } else { r0 = ret.Error(0) } @@ -149,13 +151,13 @@ func (_m *MovieReaderWriter) DestroyImages(movieID int) error { return r0 } -// Find provides a mock function with given fields: id -func (_m *MovieReaderWriter) Find(id int) (*models.Movie, error) { - ret := _m.Called(id) +// Find provides a mock function with given fields: ctx, id +func (_m *MovieReaderWriter) Find(ctx context.Context, id int) (*models.Movie, error) { + ret := _m.Called(ctx, id) var r0 *models.Movie - if rf, ok := ret.Get(0).(func(int) *models.Movie); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) *models.Movie); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Movie) @@ -163,8 +165,8 @@ func (_m *MovieReaderWriter) Find(id int) (*models.Movie, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -172,13 +174,13 @@ func (_m *MovieReaderWriter) Find(id int) (*models.Movie, error) { return r0, r1 } -// FindByName provides a mock function with given fields: name, nocase -func (_m *MovieReaderWriter) FindByName(name string, nocase bool) (*models.Movie, error) { - ret := _m.Called(name, nocase) +// FindByName provides a mock function with given fields: ctx, name, nocase +func (_m *MovieReaderWriter) FindByName(ctx context.Context, name string, nocase bool) (*models.Movie, error) { + ret := _m.Called(ctx, name, nocase) var r0 *models.Movie - if rf, ok := ret.Get(0).(func(string, bool) *models.Movie); ok { - r0 = rf(name, nocase) + if rf, ok := ret.Get(0).(func(context.Context, string, bool) *models.Movie); ok { + r0 = rf(ctx, name, nocase) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Movie) @@ -186,8 +188,8 @@ func (_m *MovieReaderWriter) FindByName(name string, nocase bool) (*models.Movie } var r1 error - if rf, ok := ret.Get(1).(func(string, bool) error); ok { - r1 = rf(name, nocase) + if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { + r1 = rf(ctx, name, nocase) } else { r1 = ret.Error(1) } @@ -195,13 +197,13 @@ func (_m *MovieReaderWriter) FindByName(name string, nocase bool) (*models.Movie return r0, r1 } -// FindByNames provides a mock function with given fields: names, nocase -func (_m *MovieReaderWriter) FindByNames(names []string, nocase bool) ([]*models.Movie, error) { - ret := _m.Called(names, nocase) +// FindByNames provides a mock function with given fields: ctx, names, nocase +func (_m *MovieReaderWriter) FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Movie, error) { + ret := _m.Called(ctx, names, nocase) var r0 []*models.Movie - if rf, ok := ret.Get(0).(func([]string, bool) []*models.Movie); ok { - r0 = rf(names, nocase) + if rf, ok := ret.Get(0).(func(context.Context, []string, bool) []*models.Movie); ok { + r0 = rf(ctx, names, nocase) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Movie) @@ -209,8 +211,8 @@ func (_m *MovieReaderWriter) FindByNames(names []string, nocase bool) ([]*models } var r1 error - if rf, ok := ret.Get(1).(func([]string, bool) error); ok { - r1 = rf(names, nocase) + if rf, ok := ret.Get(1).(func(context.Context, []string, bool) error); ok { + r1 = rf(ctx, names, nocase) } else { r1 = ret.Error(1) } @@ -218,13 +220,13 @@ func (_m *MovieReaderWriter) FindByNames(names []string, nocase bool) ([]*models return r0, r1 } -// FindByPerformerID provides a mock function with given fields: performerID -func (_m *MovieReaderWriter) FindByPerformerID(performerID int) ([]*models.Movie, error) { - ret := _m.Called(performerID) +// FindByPerformerID provides a mock function with given fields: ctx, performerID +func (_m *MovieReaderWriter) FindByPerformerID(ctx context.Context, performerID int) ([]*models.Movie, error) { + ret := _m.Called(ctx, performerID) var r0 []*models.Movie - if rf, ok := ret.Get(0).(func(int) []*models.Movie); ok { - r0 = rf(performerID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Movie); ok { + r0 = rf(ctx, performerID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Movie) @@ -232,8 +234,8 @@ func (_m *MovieReaderWriter) FindByPerformerID(performerID int) ([]*models.Movie } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(performerID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) } else { r1 = ret.Error(1) } @@ -241,13 +243,13 @@ func (_m *MovieReaderWriter) FindByPerformerID(performerID int) ([]*models.Movie return r0, r1 } -// FindByStudioID provides a mock function with given fields: studioID -func (_m *MovieReaderWriter) FindByStudioID(studioID int) ([]*models.Movie, error) { - ret := _m.Called(studioID) +// FindByStudioID provides a mock function with given fields: ctx, studioID +func (_m *MovieReaderWriter) FindByStudioID(ctx context.Context, studioID int) ([]*models.Movie, error) { + ret := _m.Called(ctx, studioID) var r0 []*models.Movie - if rf, ok := ret.Get(0).(func(int) []*models.Movie); ok { - r0 = rf(studioID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Movie); ok { + r0 = rf(ctx, studioID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Movie) @@ -255,8 +257,8 @@ func (_m *MovieReaderWriter) FindByStudioID(studioID int) ([]*models.Movie, erro } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(studioID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, studioID) } else { r1 = ret.Error(1) } @@ -264,13 +266,13 @@ func (_m *MovieReaderWriter) FindByStudioID(studioID int) ([]*models.Movie, erro return r0, r1 } -// FindMany provides a mock function with given fields: ids -func (_m *MovieReaderWriter) FindMany(ids []int) ([]*models.Movie, error) { - ret := _m.Called(ids) +// FindMany provides a mock function with given fields: ctx, ids +func (_m *MovieReaderWriter) FindMany(ctx context.Context, ids []int) ([]*models.Movie, error) { + ret := _m.Called(ctx, ids) var r0 []*models.Movie - if rf, ok := ret.Get(0).(func([]int) []*models.Movie); ok { - r0 = rf(ids) + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.Movie); ok { + r0 = rf(ctx, ids) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Movie) @@ -278,8 +280,8 @@ func (_m *MovieReaderWriter) FindMany(ids []int) ([]*models.Movie, error) { } var r1 error - if rf, ok := ret.Get(1).(func([]int) error); ok { - r1 = rf(ids) + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) } else { r1 = ret.Error(1) } @@ -287,13 +289,13 @@ func (_m *MovieReaderWriter) FindMany(ids []int) ([]*models.Movie, error) { return r0, r1 } -// GetBackImage provides a mock function with given fields: movieID -func (_m *MovieReaderWriter) GetBackImage(movieID int) ([]byte, error) { - ret := _m.Called(movieID) +// GetBackImage provides a mock function with given fields: ctx, movieID +func (_m *MovieReaderWriter) GetBackImage(ctx context.Context, movieID int) ([]byte, error) { + ret := _m.Called(ctx, movieID) var r0 []byte - if rf, ok := ret.Get(0).(func(int) []byte); ok { - r0 = rf(movieID) + if rf, ok := ret.Get(0).(func(context.Context, int) []byte); ok { + r0 = rf(ctx, movieID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) @@ -301,8 +303,8 @@ func (_m *MovieReaderWriter) GetBackImage(movieID int) ([]byte, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(movieID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, movieID) } else { r1 = ret.Error(1) } @@ -310,13 +312,13 @@ func (_m *MovieReaderWriter) GetBackImage(movieID int) ([]byte, error) { return r0, r1 } -// GetFrontImage provides a mock function with given fields: movieID -func (_m *MovieReaderWriter) GetFrontImage(movieID int) ([]byte, error) { - ret := _m.Called(movieID) +// GetFrontImage provides a mock function with given fields: ctx, movieID +func (_m *MovieReaderWriter) GetFrontImage(ctx context.Context, movieID int) ([]byte, error) { + ret := _m.Called(ctx, movieID) var r0 []byte - if rf, ok := ret.Get(0).(func(int) []byte); ok { - r0 = rf(movieID) + if rf, ok := ret.Get(0).(func(context.Context, int) []byte); ok { + r0 = rf(ctx, movieID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) @@ -324,8 +326,8 @@ func (_m *MovieReaderWriter) GetFrontImage(movieID int) ([]byte, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(movieID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, movieID) } else { r1 = ret.Error(1) } @@ -333,13 +335,13 @@ func (_m *MovieReaderWriter) GetFrontImage(movieID int) ([]byte, error) { return r0, r1 } -// Query provides a mock function with given fields: movieFilter, findFilter -func (_m *MovieReaderWriter) Query(movieFilter *models.MovieFilterType, findFilter *models.FindFilterType) ([]*models.Movie, int, error) { - ret := _m.Called(movieFilter, findFilter) +// Query provides a mock function with given fields: ctx, movieFilter, findFilter +func (_m *MovieReaderWriter) Query(ctx context.Context, movieFilter *models.MovieFilterType, findFilter *models.FindFilterType) ([]*models.Movie, int, error) { + ret := _m.Called(ctx, movieFilter, findFilter) var r0 []*models.Movie - if rf, ok := ret.Get(0).(func(*models.MovieFilterType, *models.FindFilterType) []*models.Movie); ok { - r0 = rf(movieFilter, findFilter) + if rf, ok := ret.Get(0).(func(context.Context, *models.MovieFilterType, *models.FindFilterType) []*models.Movie); ok { + r0 = rf(ctx, movieFilter, findFilter) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Movie) @@ -347,15 +349,15 @@ func (_m *MovieReaderWriter) Query(movieFilter *models.MovieFilterType, findFilt } var r1 int - if rf, ok := ret.Get(1).(func(*models.MovieFilterType, *models.FindFilterType) int); ok { - r1 = rf(movieFilter, findFilter) + if rf, ok := ret.Get(1).(func(context.Context, *models.MovieFilterType, *models.FindFilterType) int); ok { + r1 = rf(ctx, movieFilter, findFilter) } else { r1 = ret.Get(1).(int) } var r2 error - if rf, ok := ret.Get(2).(func(*models.MovieFilterType, *models.FindFilterType) error); ok { - r2 = rf(movieFilter, findFilter) + if rf, ok := ret.Get(2).(func(context.Context, *models.MovieFilterType, *models.FindFilterType) error); ok { + r2 = rf(ctx, movieFilter, findFilter) } else { r2 = ret.Error(2) } @@ -363,13 +365,13 @@ func (_m *MovieReaderWriter) Query(movieFilter *models.MovieFilterType, findFilt return r0, r1, r2 } -// Update provides a mock function with given fields: updatedMovie -func (_m *MovieReaderWriter) Update(updatedMovie models.MoviePartial) (*models.Movie, error) { - ret := _m.Called(updatedMovie) +// Update provides a mock function with given fields: ctx, updatedMovie +func (_m *MovieReaderWriter) Update(ctx context.Context, updatedMovie models.MoviePartial) (*models.Movie, error) { + ret := _m.Called(ctx, updatedMovie) var r0 *models.Movie - if rf, ok := ret.Get(0).(func(models.MoviePartial) *models.Movie); ok { - r0 = rf(updatedMovie) + if rf, ok := ret.Get(0).(func(context.Context, models.MoviePartial) *models.Movie); ok { + r0 = rf(ctx, updatedMovie) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Movie) @@ -377,8 +379,8 @@ func (_m *MovieReaderWriter) Update(updatedMovie models.MoviePartial) (*models.M } var r1 error - if rf, ok := ret.Get(1).(func(models.MoviePartial) error); ok { - r1 = rf(updatedMovie) + if rf, ok := ret.Get(1).(func(context.Context, models.MoviePartial) error); ok { + r1 = rf(ctx, updatedMovie) } else { r1 = ret.Error(1) } @@ -386,13 +388,13 @@ func (_m *MovieReaderWriter) Update(updatedMovie models.MoviePartial) (*models.M return r0, r1 } -// UpdateFull provides a mock function with given fields: updatedMovie -func (_m *MovieReaderWriter) UpdateFull(updatedMovie models.Movie) (*models.Movie, error) { - ret := _m.Called(updatedMovie) +// UpdateFull provides a mock function with given fields: ctx, updatedMovie +func (_m *MovieReaderWriter) UpdateFull(ctx context.Context, updatedMovie models.Movie) (*models.Movie, error) { + ret := _m.Called(ctx, updatedMovie) var r0 *models.Movie - if rf, ok := ret.Get(0).(func(models.Movie) *models.Movie); ok { - r0 = rf(updatedMovie) + if rf, ok := ret.Get(0).(func(context.Context, models.Movie) *models.Movie); ok { + r0 = rf(ctx, updatedMovie) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Movie) @@ -400,8 +402,8 @@ func (_m *MovieReaderWriter) UpdateFull(updatedMovie models.Movie) (*models.Movi } var r1 error - if rf, ok := ret.Get(1).(func(models.Movie) error); ok { - r1 = rf(updatedMovie) + if rf, ok := ret.Get(1).(func(context.Context, models.Movie) error); ok { + r1 = rf(ctx, updatedMovie) } else { r1 = ret.Error(1) } @@ -409,13 +411,13 @@ func (_m *MovieReaderWriter) UpdateFull(updatedMovie models.Movie) (*models.Movi return r0, r1 } -// UpdateImages provides a mock function with given fields: movieID, frontImage, backImage -func (_m *MovieReaderWriter) UpdateImages(movieID int, frontImage []byte, backImage []byte) error { - ret := _m.Called(movieID, frontImage, backImage) +// UpdateImages provides a mock function with given fields: ctx, movieID, frontImage, backImage +func (_m *MovieReaderWriter) UpdateImages(ctx context.Context, movieID int, frontImage []byte, backImage []byte) error { + ret := _m.Called(ctx, movieID, frontImage, backImage) var r0 error - if rf, ok := ret.Get(0).(func(int, []byte, []byte) error); ok { - r0 = rf(movieID, frontImage, backImage) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte, []byte) error); ok { + r0 = rf(ctx, movieID, frontImage, backImage) } else { r0 = ret.Error(0) } diff --git a/pkg/models/mocks/PerformerReaderWriter.go b/pkg/models/mocks/PerformerReaderWriter.go index 485f75170..f3fece8e6 100644 --- a/pkg/models/mocks/PerformerReaderWriter.go +++ b/pkg/models/mocks/PerformerReaderWriter.go @@ -3,6 +3,8 @@ package mocks import ( + context "context" + models "github.com/stashapp/stash/pkg/models" mock "github.com/stretchr/testify/mock" ) @@ -12,13 +14,13 @@ type PerformerReaderWriter struct { mock.Mock } -// All provides a mock function with given fields: -func (_m *PerformerReaderWriter) All() ([]*models.Performer, error) { - ret := _m.Called() +// All provides a mock function with given fields: ctx +func (_m *PerformerReaderWriter) All(ctx context.Context) ([]*models.Performer, error) { + ret := _m.Called(ctx) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func() []*models.Performer); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []*models.Performer); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -26,8 +28,8 @@ func (_m *PerformerReaderWriter) All() ([]*models.Performer, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -35,20 +37,20 @@ func (_m *PerformerReaderWriter) All() ([]*models.Performer, error) { return r0, r1 } -// Count provides a mock function with given fields: -func (_m *PerformerReaderWriter) Count() (int, error) { - ret := _m.Called() +// Count provides a mock function with given fields: ctx +func (_m *PerformerReaderWriter) Count(ctx context.Context) (int, error) { + ret := _m.Called(ctx) var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -56,20 +58,20 @@ func (_m *PerformerReaderWriter) Count() (int, error) { return r0, r1 } -// CountByTagID provides a mock function with given fields: tagID -func (_m *PerformerReaderWriter) CountByTagID(tagID int) (int, error) { - ret := _m.Called(tagID) +// CountByTagID provides a mock function with given fields: ctx, tagID +func (_m *PerformerReaderWriter) CountByTagID(ctx context.Context, tagID int) (int, error) { + ret := _m.Called(ctx, tagID) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(tagID) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, tagID) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(tagID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, tagID) } else { r1 = ret.Error(1) } @@ -77,13 +79,13 @@ func (_m *PerformerReaderWriter) CountByTagID(tagID int) (int, error) { return r0, r1 } -// Create provides a mock function with given fields: newPerformer -func (_m *PerformerReaderWriter) Create(newPerformer models.Performer) (*models.Performer, error) { - ret := _m.Called(newPerformer) +// Create provides a mock function with given fields: ctx, newPerformer +func (_m *PerformerReaderWriter) Create(ctx context.Context, newPerformer models.Performer) (*models.Performer, error) { + ret := _m.Called(ctx, newPerformer) var r0 *models.Performer - if rf, ok := ret.Get(0).(func(models.Performer) *models.Performer); ok { - r0 = rf(newPerformer) + if rf, ok := ret.Get(0).(func(context.Context, models.Performer) *models.Performer); ok { + r0 = rf(ctx, newPerformer) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Performer) @@ -91,8 +93,8 @@ func (_m *PerformerReaderWriter) Create(newPerformer models.Performer) (*models. } var r1 error - if rf, ok := ret.Get(1).(func(models.Performer) error); ok { - r1 = rf(newPerformer) + if rf, ok := ret.Get(1).(func(context.Context, models.Performer) error); ok { + r1 = rf(ctx, newPerformer) } else { r1 = ret.Error(1) } @@ -100,13 +102,13 @@ func (_m *PerformerReaderWriter) Create(newPerformer models.Performer) (*models. return r0, r1 } -// Destroy provides a mock function with given fields: id -func (_m *PerformerReaderWriter) Destroy(id int) error { - ret := _m.Called(id) +// Destroy provides a mock function with given fields: ctx, id +func (_m *PerformerReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) } else { r0 = ret.Error(0) } @@ -114,13 +116,13 @@ func (_m *PerformerReaderWriter) Destroy(id int) error { return r0 } -// DestroyImage provides a mock function with given fields: performerID -func (_m *PerformerReaderWriter) DestroyImage(performerID int) error { - ret := _m.Called(performerID) +// DestroyImage provides a mock function with given fields: ctx, performerID +func (_m *PerformerReaderWriter) DestroyImage(ctx context.Context, performerID int) error { + ret := _m.Called(ctx, performerID) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(performerID) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, performerID) } else { r0 = ret.Error(0) } @@ -128,13 +130,13 @@ func (_m *PerformerReaderWriter) DestroyImage(performerID int) error { return r0 } -// Find provides a mock function with given fields: id -func (_m *PerformerReaderWriter) Find(id int) (*models.Performer, error) { - ret := _m.Called(id) +// Find provides a mock function with given fields: ctx, id +func (_m *PerformerReaderWriter) Find(ctx context.Context, id int) (*models.Performer, error) { + ret := _m.Called(ctx, id) var r0 *models.Performer - if rf, ok := ret.Get(0).(func(int) *models.Performer); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) *models.Performer); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Performer) @@ -142,8 +144,8 @@ func (_m *PerformerReaderWriter) Find(id int) (*models.Performer, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -151,13 +153,13 @@ func (_m *PerformerReaderWriter) Find(id int) (*models.Performer, error) { return r0, r1 } -// FindByGalleryID provides a mock function with given fields: galleryID -func (_m *PerformerReaderWriter) FindByGalleryID(galleryID int) ([]*models.Performer, error) { - ret := _m.Called(galleryID) +// FindByGalleryID provides a mock function with given fields: ctx, galleryID +func (_m *PerformerReaderWriter) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Performer, error) { + ret := _m.Called(ctx, galleryID) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func(int) []*models.Performer); ok { - r0 = rf(galleryID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Performer); ok { + r0 = rf(ctx, galleryID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -165,8 +167,8 @@ func (_m *PerformerReaderWriter) FindByGalleryID(galleryID int) ([]*models.Perfo } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(galleryID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, galleryID) } else { r1 = ret.Error(1) } @@ -174,13 +176,13 @@ func (_m *PerformerReaderWriter) FindByGalleryID(galleryID int) ([]*models.Perfo return r0, r1 } -// FindByImageID provides a mock function with given fields: imageID -func (_m *PerformerReaderWriter) FindByImageID(imageID int) ([]*models.Performer, error) { - ret := _m.Called(imageID) +// FindByImageID provides a mock function with given fields: ctx, imageID +func (_m *PerformerReaderWriter) FindByImageID(ctx context.Context, imageID int) ([]*models.Performer, error) { + ret := _m.Called(ctx, imageID) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func(int) []*models.Performer); ok { - r0 = rf(imageID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Performer); ok { + r0 = rf(ctx, imageID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -188,8 +190,8 @@ func (_m *PerformerReaderWriter) FindByImageID(imageID int) ([]*models.Performer } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(imageID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, imageID) } else { r1 = ret.Error(1) } @@ -197,13 +199,13 @@ func (_m *PerformerReaderWriter) FindByImageID(imageID int) ([]*models.Performer return r0, r1 } -// FindByNames provides a mock function with given fields: names, nocase -func (_m *PerformerReaderWriter) FindByNames(names []string, nocase bool) ([]*models.Performer, error) { - ret := _m.Called(names, nocase) +// FindByNames provides a mock function with given fields: ctx, names, nocase +func (_m *PerformerReaderWriter) FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Performer, error) { + ret := _m.Called(ctx, names, nocase) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func([]string, bool) []*models.Performer); ok { - r0 = rf(names, nocase) + if rf, ok := ret.Get(0).(func(context.Context, []string, bool) []*models.Performer); ok { + r0 = rf(ctx, names, nocase) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -211,8 +213,8 @@ func (_m *PerformerReaderWriter) FindByNames(names []string, nocase bool) ([]*mo } var r1 error - if rf, ok := ret.Get(1).(func([]string, bool) error); ok { - r1 = rf(names, nocase) + if rf, ok := ret.Get(1).(func(context.Context, []string, bool) error); ok { + r1 = rf(ctx, names, nocase) } else { r1 = ret.Error(1) } @@ -220,13 +222,13 @@ func (_m *PerformerReaderWriter) FindByNames(names []string, nocase bool) ([]*mo return r0, r1 } -// FindBySceneID provides a mock function with given fields: sceneID -func (_m *PerformerReaderWriter) FindBySceneID(sceneID int) ([]*models.Performer, error) { - ret := _m.Called(sceneID) +// FindBySceneID provides a mock function with given fields: ctx, sceneID +func (_m *PerformerReaderWriter) FindBySceneID(ctx context.Context, sceneID int) ([]*models.Performer, error) { + ret := _m.Called(ctx, sceneID) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func(int) []*models.Performer); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Performer); ok { + r0 = rf(ctx, sceneID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -234,8 +236,8 @@ func (_m *PerformerReaderWriter) FindBySceneID(sceneID int) ([]*models.Performer } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, sceneID) } else { r1 = ret.Error(1) } @@ -243,13 +245,13 @@ func (_m *PerformerReaderWriter) FindBySceneID(sceneID int) ([]*models.Performer return r0, r1 } -// FindByStashID provides a mock function with given fields: stashID -func (_m *PerformerReaderWriter) FindByStashID(stashID models.StashID) ([]*models.Performer, error) { - ret := _m.Called(stashID) +// FindByStashID provides a mock function with given fields: ctx, stashID +func (_m *PerformerReaderWriter) FindByStashID(ctx context.Context, stashID models.StashID) ([]*models.Performer, error) { + ret := _m.Called(ctx, stashID) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func(models.StashID) []*models.Performer); ok { - r0 = rf(stashID) + if rf, ok := ret.Get(0).(func(context.Context, models.StashID) []*models.Performer); ok { + r0 = rf(ctx, stashID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -257,8 +259,8 @@ func (_m *PerformerReaderWriter) FindByStashID(stashID models.StashID) ([]*model } var r1 error - if rf, ok := ret.Get(1).(func(models.StashID) error); ok { - r1 = rf(stashID) + if rf, ok := ret.Get(1).(func(context.Context, models.StashID) error); ok { + r1 = rf(ctx, stashID) } else { r1 = ret.Error(1) } @@ -266,13 +268,13 @@ func (_m *PerformerReaderWriter) FindByStashID(stashID models.StashID) ([]*model return r0, r1 } -// FindByStashIDStatus provides a mock function with given fields: hasStashID, stashboxEndpoint -func (_m *PerformerReaderWriter) FindByStashIDStatus(hasStashID bool, stashboxEndpoint string) ([]*models.Performer, error) { - ret := _m.Called(hasStashID, stashboxEndpoint) +// FindByStashIDStatus provides a mock function with given fields: ctx, hasStashID, stashboxEndpoint +func (_m *PerformerReaderWriter) FindByStashIDStatus(ctx context.Context, hasStashID bool, stashboxEndpoint string) ([]*models.Performer, error) { + ret := _m.Called(ctx, hasStashID, stashboxEndpoint) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func(bool, string) []*models.Performer); ok { - r0 = rf(hasStashID, stashboxEndpoint) + if rf, ok := ret.Get(0).(func(context.Context, bool, string) []*models.Performer); ok { + r0 = rf(ctx, hasStashID, stashboxEndpoint) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -280,8 +282,8 @@ func (_m *PerformerReaderWriter) FindByStashIDStatus(hasStashID bool, stashboxEn } var r1 error - if rf, ok := ret.Get(1).(func(bool, string) error); ok { - r1 = rf(hasStashID, stashboxEndpoint) + if rf, ok := ret.Get(1).(func(context.Context, bool, string) error); ok { + r1 = rf(ctx, hasStashID, stashboxEndpoint) } else { r1 = ret.Error(1) } @@ -289,13 +291,13 @@ func (_m *PerformerReaderWriter) FindByStashIDStatus(hasStashID bool, stashboxEn return r0, r1 } -// FindMany provides a mock function with given fields: ids -func (_m *PerformerReaderWriter) FindMany(ids []int) ([]*models.Performer, error) { - ret := _m.Called(ids) +// FindMany provides a mock function with given fields: ctx, ids +func (_m *PerformerReaderWriter) FindMany(ctx context.Context, ids []int) ([]*models.Performer, error) { + ret := _m.Called(ctx, ids) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func([]int) []*models.Performer); ok { - r0 = rf(ids) + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.Performer); ok { + r0 = rf(ctx, ids) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -303,8 +305,8 @@ func (_m *PerformerReaderWriter) FindMany(ids []int) ([]*models.Performer, error } var r1 error - if rf, ok := ret.Get(1).(func([]int) error); ok { - r1 = rf(ids) + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) } else { r1 = ret.Error(1) } @@ -312,13 +314,13 @@ func (_m *PerformerReaderWriter) FindMany(ids []int) ([]*models.Performer, error return r0, r1 } -// FindNamesBySceneID provides a mock function with given fields: sceneID -func (_m *PerformerReaderWriter) FindNamesBySceneID(sceneID int) ([]*models.Performer, error) { - ret := _m.Called(sceneID) +// FindNamesBySceneID provides a mock function with given fields: ctx, sceneID +func (_m *PerformerReaderWriter) FindNamesBySceneID(ctx context.Context, sceneID int) ([]*models.Performer, error) { + ret := _m.Called(ctx, sceneID) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func(int) []*models.Performer); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Performer); ok { + r0 = rf(ctx, sceneID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -326,8 +328,8 @@ func (_m *PerformerReaderWriter) FindNamesBySceneID(sceneID int) ([]*models.Perf } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, sceneID) } else { r1 = ret.Error(1) } @@ -335,13 +337,13 @@ func (_m *PerformerReaderWriter) FindNamesBySceneID(sceneID int) ([]*models.Perf return r0, r1 } -// GetImage provides a mock function with given fields: performerID -func (_m *PerformerReaderWriter) GetImage(performerID int) ([]byte, error) { - ret := _m.Called(performerID) +// GetImage provides a mock function with given fields: ctx, performerID +func (_m *PerformerReaderWriter) GetImage(ctx context.Context, performerID int) ([]byte, error) { + ret := _m.Called(ctx, performerID) var r0 []byte - if rf, ok := ret.Get(0).(func(int) []byte); ok { - r0 = rf(performerID) + if rf, ok := ret.Get(0).(func(context.Context, int) []byte); ok { + r0 = rf(ctx, performerID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) @@ -349,8 +351,8 @@ func (_m *PerformerReaderWriter) GetImage(performerID int) ([]byte, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(performerID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) } else { r1 = ret.Error(1) } @@ -358,22 +360,22 @@ func (_m *PerformerReaderWriter) GetImage(performerID int) ([]byte, error) { return r0, r1 } -// GetStashIDs provides a mock function with given fields: performerID -func (_m *PerformerReaderWriter) GetStashIDs(performerID int) ([]*models.StashID, error) { - ret := _m.Called(performerID) +// GetStashIDs provides a mock function with given fields: ctx, relatedID +func (_m *PerformerReaderWriter) GetStashIDs(ctx context.Context, relatedID int) ([]models.StashID, error) { + ret := _m.Called(ctx, relatedID) - var r0 []*models.StashID - if rf, ok := ret.Get(0).(func(int) []*models.StashID); ok { - r0 = rf(performerID) + var r0 []models.StashID + if rf, ok := ret.Get(0).(func(context.Context, int) []models.StashID); ok { + r0 = rf(ctx, relatedID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*models.StashID) + r0 = ret.Get(0).([]models.StashID) } } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(performerID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) } else { r1 = ret.Error(1) } @@ -381,13 +383,13 @@ func (_m *PerformerReaderWriter) GetStashIDs(performerID int) ([]*models.StashID return r0, r1 } -// GetTagIDs provides a mock function with given fields: performerID -func (_m *PerformerReaderWriter) GetTagIDs(performerID int) ([]int, error) { - ret := _m.Called(performerID) +// GetTagIDs provides a mock function with given fields: ctx, performerID +func (_m *PerformerReaderWriter) GetTagIDs(ctx context.Context, performerID int) ([]int, error) { + ret := _m.Called(ctx, performerID) var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(performerID) + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, performerID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]int) @@ -395,8 +397,8 @@ func (_m *PerformerReaderWriter) GetTagIDs(performerID int) ([]int, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(performerID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) } else { r1 = ret.Error(1) } @@ -404,13 +406,13 @@ func (_m *PerformerReaderWriter) GetTagIDs(performerID int) ([]int, error) { return r0, r1 } -// Query provides a mock function with given fields: performerFilter, findFilter -func (_m *PerformerReaderWriter) Query(performerFilter *models.PerformerFilterType, findFilter *models.FindFilterType) ([]*models.Performer, int, error) { - ret := _m.Called(performerFilter, findFilter) +// Query provides a mock function with given fields: ctx, performerFilter, findFilter +func (_m *PerformerReaderWriter) Query(ctx context.Context, performerFilter *models.PerformerFilterType, findFilter *models.FindFilterType) ([]*models.Performer, int, error) { + ret := _m.Called(ctx, performerFilter, findFilter) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func(*models.PerformerFilterType, *models.FindFilterType) []*models.Performer); ok { - r0 = rf(performerFilter, findFilter) + if rf, ok := ret.Get(0).(func(context.Context, *models.PerformerFilterType, *models.FindFilterType) []*models.Performer); ok { + r0 = rf(ctx, performerFilter, findFilter) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -418,15 +420,15 @@ func (_m *PerformerReaderWriter) Query(performerFilter *models.PerformerFilterTy } var r1 int - if rf, ok := ret.Get(1).(func(*models.PerformerFilterType, *models.FindFilterType) int); ok { - r1 = rf(performerFilter, findFilter) + if rf, ok := ret.Get(1).(func(context.Context, *models.PerformerFilterType, *models.FindFilterType) int); ok { + r1 = rf(ctx, performerFilter, findFilter) } else { r1 = ret.Get(1).(int) } var r2 error - if rf, ok := ret.Get(2).(func(*models.PerformerFilterType, *models.FindFilterType) error); ok { - r2 = rf(performerFilter, findFilter) + if rf, ok := ret.Get(2).(func(context.Context, *models.PerformerFilterType, *models.FindFilterType) error); ok { + r2 = rf(ctx, performerFilter, findFilter) } else { r2 = ret.Error(2) } @@ -434,13 +436,13 @@ func (_m *PerformerReaderWriter) Query(performerFilter *models.PerformerFilterTy return r0, r1, r2 } -// QueryForAutoTag provides a mock function with given fields: words -func (_m *PerformerReaderWriter) QueryForAutoTag(words []string) ([]*models.Performer, error) { - ret := _m.Called(words) +// QueryForAutoTag provides a mock function with given fields: ctx, words +func (_m *PerformerReaderWriter) QueryForAutoTag(ctx context.Context, words []string) ([]*models.Performer, error) { + ret := _m.Called(ctx, words) var r0 []*models.Performer - if rf, ok := ret.Get(0).(func([]string) []*models.Performer); ok { - r0 = rf(words) + if rf, ok := ret.Get(0).(func(context.Context, []string) []*models.Performer); ok { + r0 = rf(ctx, words) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Performer) @@ -448,8 +450,8 @@ func (_m *PerformerReaderWriter) QueryForAutoTag(words []string) ([]*models.Perf } var r1 error - if rf, ok := ret.Get(1).(func([]string) error); ok { - r1 = rf(words) + if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { + r1 = rf(ctx, words) } else { r1 = ret.Error(1) } @@ -457,13 +459,13 @@ func (_m *PerformerReaderWriter) QueryForAutoTag(words []string) ([]*models.Perf return r0, r1 } -// Update provides a mock function with given fields: updatedPerformer -func (_m *PerformerReaderWriter) Update(updatedPerformer models.PerformerPartial) (*models.Performer, error) { - ret := _m.Called(updatedPerformer) +// Update provides a mock function with given fields: ctx, updatedPerformer +func (_m *PerformerReaderWriter) Update(ctx context.Context, updatedPerformer models.PerformerPartial) (*models.Performer, error) { + ret := _m.Called(ctx, updatedPerformer) var r0 *models.Performer - if rf, ok := ret.Get(0).(func(models.PerformerPartial) *models.Performer); ok { - r0 = rf(updatedPerformer) + if rf, ok := ret.Get(0).(func(context.Context, models.PerformerPartial) *models.Performer); ok { + r0 = rf(ctx, updatedPerformer) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Performer) @@ -471,8 +473,8 @@ func (_m *PerformerReaderWriter) Update(updatedPerformer models.PerformerPartial } var r1 error - if rf, ok := ret.Get(1).(func(models.PerformerPartial) error); ok { - r1 = rf(updatedPerformer) + if rf, ok := ret.Get(1).(func(context.Context, models.PerformerPartial) error); ok { + r1 = rf(ctx, updatedPerformer) } else { r1 = ret.Error(1) } @@ -480,13 +482,13 @@ func (_m *PerformerReaderWriter) Update(updatedPerformer models.PerformerPartial return r0, r1 } -// UpdateFull provides a mock function with given fields: updatedPerformer -func (_m *PerformerReaderWriter) UpdateFull(updatedPerformer models.Performer) (*models.Performer, error) { - ret := _m.Called(updatedPerformer) +// UpdateFull provides a mock function with given fields: ctx, updatedPerformer +func (_m *PerformerReaderWriter) UpdateFull(ctx context.Context, updatedPerformer models.Performer) (*models.Performer, error) { + ret := _m.Called(ctx, updatedPerformer) var r0 *models.Performer - if rf, ok := ret.Get(0).(func(models.Performer) *models.Performer); ok { - r0 = rf(updatedPerformer) + if rf, ok := ret.Get(0).(func(context.Context, models.Performer) *models.Performer); ok { + r0 = rf(ctx, updatedPerformer) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Performer) @@ -494,8 +496,8 @@ func (_m *PerformerReaderWriter) UpdateFull(updatedPerformer models.Performer) ( } var r1 error - if rf, ok := ret.Get(1).(func(models.Performer) error); ok { - r1 = rf(updatedPerformer) + if rf, ok := ret.Get(1).(func(context.Context, models.Performer) error); ok { + r1 = rf(ctx, updatedPerformer) } else { r1 = ret.Error(1) } @@ -503,13 +505,13 @@ func (_m *PerformerReaderWriter) UpdateFull(updatedPerformer models.Performer) ( return r0, r1 } -// UpdateImage provides a mock function with given fields: performerID, image -func (_m *PerformerReaderWriter) UpdateImage(performerID int, image []byte) error { - ret := _m.Called(performerID, image) +// UpdateImage provides a mock function with given fields: ctx, performerID, image +func (_m *PerformerReaderWriter) UpdateImage(ctx context.Context, performerID int, image []byte) error { + ret := _m.Called(ctx, performerID, image) var r0 error - if rf, ok := ret.Get(0).(func(int, []byte) error); ok { - r0 = rf(performerID, image) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) error); ok { + r0 = rf(ctx, performerID, image) } else { r0 = ret.Error(0) } @@ -517,13 +519,13 @@ func (_m *PerformerReaderWriter) UpdateImage(performerID int, image []byte) erro return r0 } -// UpdateStashIDs provides a mock function with given fields: performerID, stashIDs -func (_m *PerformerReaderWriter) UpdateStashIDs(performerID int, stashIDs []models.StashID) error { - ret := _m.Called(performerID, stashIDs) +// UpdateStashIDs provides a mock function with given fields: ctx, performerID, stashIDs +func (_m *PerformerReaderWriter) UpdateStashIDs(ctx context.Context, performerID int, stashIDs []models.StashID) error { + ret := _m.Called(ctx, performerID, stashIDs) var r0 error - if rf, ok := ret.Get(0).(func(int, []models.StashID) error); ok { - r0 = rf(performerID, stashIDs) + if rf, ok := ret.Get(0).(func(context.Context, int, []models.StashID) error); ok { + r0 = rf(ctx, performerID, stashIDs) } else { r0 = ret.Error(0) } @@ -531,13 +533,13 @@ func (_m *PerformerReaderWriter) UpdateStashIDs(performerID int, stashIDs []mode return r0 } -// UpdateTags provides a mock function with given fields: performerID, tagIDs -func (_m *PerformerReaderWriter) UpdateTags(performerID int, tagIDs []int) error { - ret := _m.Called(performerID, tagIDs) +// UpdateTags provides a mock function with given fields: ctx, performerID, tagIDs +func (_m *PerformerReaderWriter) UpdateTags(ctx context.Context, performerID int, tagIDs []int) error { + ret := _m.Called(ctx, performerID, tagIDs) var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(performerID, tagIDs) + if rf, ok := ret.Get(0).(func(context.Context, int, []int) error); ok { + r0 = rf(ctx, performerID, tagIDs) } else { r0 = ret.Error(0) } diff --git a/pkg/models/mocks/SavedFilterReaderWriter.go b/pkg/models/mocks/SavedFilterReaderWriter.go index 952497be2..8f9e6e553 100644 --- a/pkg/models/mocks/SavedFilterReaderWriter.go +++ b/pkg/models/mocks/SavedFilterReaderWriter.go @@ -3,6 +3,8 @@ package mocks import ( + context "context" + models "github.com/stashapp/stash/pkg/models" mock "github.com/stretchr/testify/mock" ) @@ -12,13 +14,13 @@ type SavedFilterReaderWriter struct { mock.Mock } -// All provides a mock function with given fields: -func (_m *SavedFilterReaderWriter) All() ([]*models.SavedFilter, error) { - ret := _m.Called() +// All provides a mock function with given fields: ctx +func (_m *SavedFilterReaderWriter) All(ctx context.Context) ([]*models.SavedFilter, error) { + ret := _m.Called(ctx) var r0 []*models.SavedFilter - if rf, ok := ret.Get(0).(func() []*models.SavedFilter); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []*models.SavedFilter); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.SavedFilter) @@ -26,8 +28,8 @@ func (_m *SavedFilterReaderWriter) All() ([]*models.SavedFilter, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -35,13 +37,13 @@ func (_m *SavedFilterReaderWriter) All() ([]*models.SavedFilter, error) { return r0, r1 } -// Create provides a mock function with given fields: obj -func (_m *SavedFilterReaderWriter) Create(obj models.SavedFilter) (*models.SavedFilter, error) { - ret := _m.Called(obj) +// Create provides a mock function with given fields: ctx, obj +func (_m *SavedFilterReaderWriter) Create(ctx context.Context, obj models.SavedFilter) (*models.SavedFilter, error) { + ret := _m.Called(ctx, obj) var r0 *models.SavedFilter - if rf, ok := ret.Get(0).(func(models.SavedFilter) *models.SavedFilter); ok { - r0 = rf(obj) + if rf, ok := ret.Get(0).(func(context.Context, models.SavedFilter) *models.SavedFilter); ok { + r0 = rf(ctx, obj) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.SavedFilter) @@ -49,8 +51,8 @@ func (_m *SavedFilterReaderWriter) Create(obj models.SavedFilter) (*models.Saved } var r1 error - if rf, ok := ret.Get(1).(func(models.SavedFilter) error); ok { - r1 = rf(obj) + if rf, ok := ret.Get(1).(func(context.Context, models.SavedFilter) error); ok { + r1 = rf(ctx, obj) } else { r1 = ret.Error(1) } @@ -58,13 +60,13 @@ func (_m *SavedFilterReaderWriter) Create(obj models.SavedFilter) (*models.Saved return r0, r1 } -// Destroy provides a mock function with given fields: id -func (_m *SavedFilterReaderWriter) Destroy(id int) error { - ret := _m.Called(id) +// Destroy provides a mock function with given fields: ctx, id +func (_m *SavedFilterReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) } else { r0 = ret.Error(0) } @@ -72,13 +74,13 @@ func (_m *SavedFilterReaderWriter) Destroy(id int) error { return r0 } -// Find provides a mock function with given fields: id -func (_m *SavedFilterReaderWriter) Find(id int) (*models.SavedFilter, error) { - ret := _m.Called(id) +// Find provides a mock function with given fields: ctx, id +func (_m *SavedFilterReaderWriter) Find(ctx context.Context, id int) (*models.SavedFilter, error) { + ret := _m.Called(ctx, id) var r0 *models.SavedFilter - if rf, ok := ret.Get(0).(func(int) *models.SavedFilter); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) *models.SavedFilter); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.SavedFilter) @@ -86,8 +88,8 @@ func (_m *SavedFilterReaderWriter) Find(id int) (*models.SavedFilter, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -95,13 +97,13 @@ func (_m *SavedFilterReaderWriter) Find(id int) (*models.SavedFilter, error) { return r0, r1 } -// FindByMode provides a mock function with given fields: mode -func (_m *SavedFilterReaderWriter) FindByMode(mode models.FilterMode) ([]*models.SavedFilter, error) { - ret := _m.Called(mode) +// FindByMode provides a mock function with given fields: ctx, mode +func (_m *SavedFilterReaderWriter) FindByMode(ctx context.Context, mode models.FilterMode) ([]*models.SavedFilter, error) { + ret := _m.Called(ctx, mode) var r0 []*models.SavedFilter - if rf, ok := ret.Get(0).(func(models.FilterMode) []*models.SavedFilter); ok { - r0 = rf(mode) + if rf, ok := ret.Get(0).(func(context.Context, models.FilterMode) []*models.SavedFilter); ok { + r0 = rf(ctx, mode) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.SavedFilter) @@ -109,8 +111,8 @@ func (_m *SavedFilterReaderWriter) FindByMode(mode models.FilterMode) ([]*models } var r1 error - if rf, ok := ret.Get(1).(func(models.FilterMode) error); ok { - r1 = rf(mode) + if rf, ok := ret.Get(1).(func(context.Context, models.FilterMode) error); ok { + r1 = rf(ctx, mode) } else { r1 = ret.Error(1) } @@ -118,13 +120,13 @@ func (_m *SavedFilterReaderWriter) FindByMode(mode models.FilterMode) ([]*models return r0, r1 } -// FindDefault provides a mock function with given fields: mode -func (_m *SavedFilterReaderWriter) FindDefault(mode models.FilterMode) (*models.SavedFilter, error) { - ret := _m.Called(mode) +// FindDefault provides a mock function with given fields: ctx, mode +func (_m *SavedFilterReaderWriter) FindDefault(ctx context.Context, mode models.FilterMode) (*models.SavedFilter, error) { + ret := _m.Called(ctx, mode) var r0 *models.SavedFilter - if rf, ok := ret.Get(0).(func(models.FilterMode) *models.SavedFilter); ok { - r0 = rf(mode) + if rf, ok := ret.Get(0).(func(context.Context, models.FilterMode) *models.SavedFilter); ok { + r0 = rf(ctx, mode) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.SavedFilter) @@ -132,8 +134,8 @@ func (_m *SavedFilterReaderWriter) FindDefault(mode models.FilterMode) (*models. } var r1 error - if rf, ok := ret.Get(1).(func(models.FilterMode) error); ok { - r1 = rf(mode) + if rf, ok := ret.Get(1).(func(context.Context, models.FilterMode) error); ok { + r1 = rf(ctx, mode) } else { r1 = ret.Error(1) } @@ -141,13 +143,13 @@ func (_m *SavedFilterReaderWriter) FindDefault(mode models.FilterMode) (*models. return r0, r1 } -// FindMany provides a mock function with given fields: ids, ignoreNotFound -func (_m *SavedFilterReaderWriter) FindMany(ids []int, ignoreNotFound bool) ([]*models.SavedFilter, error) { - ret := _m.Called(ids, ignoreNotFound) +// FindMany provides a mock function with given fields: ctx, ids, ignoreNotFound +func (_m *SavedFilterReaderWriter) FindMany(ctx context.Context, ids []int, ignoreNotFound bool) ([]*models.SavedFilter, error) { + ret := _m.Called(ctx, ids, ignoreNotFound) var r0 []*models.SavedFilter - if rf, ok := ret.Get(0).(func([]int, bool) []*models.SavedFilter); ok { - r0 = rf(ids, ignoreNotFound) + if rf, ok := ret.Get(0).(func(context.Context, []int, bool) []*models.SavedFilter); ok { + r0 = rf(ctx, ids, ignoreNotFound) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.SavedFilter) @@ -155,8 +157,8 @@ func (_m *SavedFilterReaderWriter) FindMany(ids []int, ignoreNotFound bool) ([]* } var r1 error - if rf, ok := ret.Get(1).(func([]int, bool) error); ok { - r1 = rf(ids, ignoreNotFound) + if rf, ok := ret.Get(1).(func(context.Context, []int, bool) error); ok { + r1 = rf(ctx, ids, ignoreNotFound) } else { r1 = ret.Error(1) } @@ -164,13 +166,13 @@ func (_m *SavedFilterReaderWriter) FindMany(ids []int, ignoreNotFound bool) ([]* return r0, r1 } -// SetDefault provides a mock function with given fields: obj -func (_m *SavedFilterReaderWriter) SetDefault(obj models.SavedFilter) (*models.SavedFilter, error) { - ret := _m.Called(obj) +// SetDefault provides a mock function with given fields: ctx, obj +func (_m *SavedFilterReaderWriter) SetDefault(ctx context.Context, obj models.SavedFilter) (*models.SavedFilter, error) { + ret := _m.Called(ctx, obj) var r0 *models.SavedFilter - if rf, ok := ret.Get(0).(func(models.SavedFilter) *models.SavedFilter); ok { - r0 = rf(obj) + if rf, ok := ret.Get(0).(func(context.Context, models.SavedFilter) *models.SavedFilter); ok { + r0 = rf(ctx, obj) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.SavedFilter) @@ -178,8 +180,8 @@ func (_m *SavedFilterReaderWriter) SetDefault(obj models.SavedFilter) (*models.S } var r1 error - if rf, ok := ret.Get(1).(func(models.SavedFilter) error); ok { - r1 = rf(obj) + if rf, ok := ret.Get(1).(func(context.Context, models.SavedFilter) error); ok { + r1 = rf(ctx, obj) } else { r1 = ret.Error(1) } @@ -187,13 +189,13 @@ func (_m *SavedFilterReaderWriter) SetDefault(obj models.SavedFilter) (*models.S return r0, r1 } -// Update provides a mock function with given fields: obj -func (_m *SavedFilterReaderWriter) Update(obj models.SavedFilter) (*models.SavedFilter, error) { - ret := _m.Called(obj) +// Update provides a mock function with given fields: ctx, obj +func (_m *SavedFilterReaderWriter) Update(ctx context.Context, obj models.SavedFilter) (*models.SavedFilter, error) { + ret := _m.Called(ctx, obj) var r0 *models.SavedFilter - if rf, ok := ret.Get(0).(func(models.SavedFilter) *models.SavedFilter); ok { - r0 = rf(obj) + if rf, ok := ret.Get(0).(func(context.Context, models.SavedFilter) *models.SavedFilter); ok { + r0 = rf(ctx, obj) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.SavedFilter) @@ -201,8 +203,8 @@ func (_m *SavedFilterReaderWriter) Update(obj models.SavedFilter) (*models.Saved } var r1 error - if rf, ok := ret.Get(1).(func(models.SavedFilter) error); ok { - r1 = rf(obj) + if rf, ok := ret.Get(1).(func(context.Context, models.SavedFilter) error); ok { + r1 = rf(ctx, obj) } else { r1 = ret.Error(1) } diff --git a/pkg/models/mocks/SceneMarkerReaderWriter.go b/pkg/models/mocks/SceneMarkerReaderWriter.go index 2e6fea3a0..695a54391 100644 --- a/pkg/models/mocks/SceneMarkerReaderWriter.go +++ b/pkg/models/mocks/SceneMarkerReaderWriter.go @@ -3,6 +3,8 @@ package mocks import ( + context "context" + models "github.com/stashapp/stash/pkg/models" mock "github.com/stretchr/testify/mock" ) @@ -12,20 +14,20 @@ type SceneMarkerReaderWriter struct { mock.Mock } -// CountByTagID provides a mock function with given fields: tagID -func (_m *SceneMarkerReaderWriter) CountByTagID(tagID int) (int, error) { - ret := _m.Called(tagID) +// CountByTagID provides a mock function with given fields: ctx, tagID +func (_m *SceneMarkerReaderWriter) CountByTagID(ctx context.Context, tagID int) (int, error) { + ret := _m.Called(ctx, tagID) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(tagID) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, tagID) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(tagID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, tagID) } else { r1 = ret.Error(1) } @@ -33,13 +35,13 @@ func (_m *SceneMarkerReaderWriter) CountByTagID(tagID int) (int, error) { return r0, r1 } -// Create provides a mock function with given fields: newSceneMarker -func (_m *SceneMarkerReaderWriter) Create(newSceneMarker models.SceneMarker) (*models.SceneMarker, error) { - ret := _m.Called(newSceneMarker) +// Create provides a mock function with given fields: ctx, newSceneMarker +func (_m *SceneMarkerReaderWriter) Create(ctx context.Context, newSceneMarker models.SceneMarker) (*models.SceneMarker, error) { + ret := _m.Called(ctx, newSceneMarker) var r0 *models.SceneMarker - if rf, ok := ret.Get(0).(func(models.SceneMarker) *models.SceneMarker); ok { - r0 = rf(newSceneMarker) + if rf, ok := ret.Get(0).(func(context.Context, models.SceneMarker) *models.SceneMarker); ok { + r0 = rf(ctx, newSceneMarker) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.SceneMarker) @@ -47,8 +49,8 @@ func (_m *SceneMarkerReaderWriter) Create(newSceneMarker models.SceneMarker) (*m } var r1 error - if rf, ok := ret.Get(1).(func(models.SceneMarker) error); ok { - r1 = rf(newSceneMarker) + if rf, ok := ret.Get(1).(func(context.Context, models.SceneMarker) error); ok { + r1 = rf(ctx, newSceneMarker) } else { r1 = ret.Error(1) } @@ -56,13 +58,13 @@ func (_m *SceneMarkerReaderWriter) Create(newSceneMarker models.SceneMarker) (*m return r0, r1 } -// Destroy provides a mock function with given fields: id -func (_m *SceneMarkerReaderWriter) Destroy(id int) error { - ret := _m.Called(id) +// Destroy provides a mock function with given fields: ctx, id +func (_m *SceneMarkerReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) } else { r0 = ret.Error(0) } @@ -70,13 +72,13 @@ func (_m *SceneMarkerReaderWriter) Destroy(id int) error { return r0 } -// Find provides a mock function with given fields: id -func (_m *SceneMarkerReaderWriter) Find(id int) (*models.SceneMarker, error) { - ret := _m.Called(id) +// Find provides a mock function with given fields: ctx, id +func (_m *SceneMarkerReaderWriter) Find(ctx context.Context, id int) (*models.SceneMarker, error) { + ret := _m.Called(ctx, id) var r0 *models.SceneMarker - if rf, ok := ret.Get(0).(func(int) *models.SceneMarker); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) *models.SceneMarker); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.SceneMarker) @@ -84,8 +86,8 @@ func (_m *SceneMarkerReaderWriter) Find(id int) (*models.SceneMarker, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -93,13 +95,13 @@ func (_m *SceneMarkerReaderWriter) Find(id int) (*models.SceneMarker, error) { return r0, r1 } -// FindBySceneID provides a mock function with given fields: sceneID -func (_m *SceneMarkerReaderWriter) FindBySceneID(sceneID int) ([]*models.SceneMarker, error) { - ret := _m.Called(sceneID) +// FindBySceneID provides a mock function with given fields: ctx, sceneID +func (_m *SceneMarkerReaderWriter) FindBySceneID(ctx context.Context, sceneID int) ([]*models.SceneMarker, error) { + ret := _m.Called(ctx, sceneID) var r0 []*models.SceneMarker - if rf, ok := ret.Get(0).(func(int) []*models.SceneMarker); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.SceneMarker); ok { + r0 = rf(ctx, sceneID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.SceneMarker) @@ -107,8 +109,8 @@ func (_m *SceneMarkerReaderWriter) FindBySceneID(sceneID int) ([]*models.SceneMa } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, sceneID) } else { r1 = ret.Error(1) } @@ -116,13 +118,13 @@ func (_m *SceneMarkerReaderWriter) FindBySceneID(sceneID int) ([]*models.SceneMa return r0, r1 } -// FindMany provides a mock function with given fields: ids -func (_m *SceneMarkerReaderWriter) FindMany(ids []int) ([]*models.SceneMarker, error) { - ret := _m.Called(ids) +// FindMany provides a mock function with given fields: ctx, ids +func (_m *SceneMarkerReaderWriter) FindMany(ctx context.Context, ids []int) ([]*models.SceneMarker, error) { + ret := _m.Called(ctx, ids) var r0 []*models.SceneMarker - if rf, ok := ret.Get(0).(func([]int) []*models.SceneMarker); ok { - r0 = rf(ids) + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.SceneMarker); ok { + r0 = rf(ctx, ids) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.SceneMarker) @@ -130,8 +132,8 @@ func (_m *SceneMarkerReaderWriter) FindMany(ids []int) ([]*models.SceneMarker, e } var r1 error - if rf, ok := ret.Get(1).(func([]int) error); ok { - r1 = rf(ids) + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) } else { r1 = ret.Error(1) } @@ -139,13 +141,13 @@ func (_m *SceneMarkerReaderWriter) FindMany(ids []int) ([]*models.SceneMarker, e return r0, r1 } -// GetMarkerStrings provides a mock function with given fields: q, sort -func (_m *SceneMarkerReaderWriter) GetMarkerStrings(q *string, sort *string) ([]*models.MarkerStringsResultType, error) { - ret := _m.Called(q, sort) +// GetMarkerStrings provides a mock function with given fields: ctx, q, sort +func (_m *SceneMarkerReaderWriter) GetMarkerStrings(ctx context.Context, q *string, sort *string) ([]*models.MarkerStringsResultType, error) { + ret := _m.Called(ctx, q, sort) var r0 []*models.MarkerStringsResultType - if rf, ok := ret.Get(0).(func(*string, *string) []*models.MarkerStringsResultType); ok { - r0 = rf(q, sort) + if rf, ok := ret.Get(0).(func(context.Context, *string, *string) []*models.MarkerStringsResultType); ok { + r0 = rf(ctx, q, sort) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.MarkerStringsResultType) @@ -153,8 +155,8 @@ func (_m *SceneMarkerReaderWriter) GetMarkerStrings(q *string, sort *string) ([] } var r1 error - if rf, ok := ret.Get(1).(func(*string, *string) error); ok { - r1 = rf(q, sort) + if rf, ok := ret.Get(1).(func(context.Context, *string, *string) error); ok { + r1 = rf(ctx, q, sort) } else { r1 = ret.Error(1) } @@ -162,13 +164,13 @@ func (_m *SceneMarkerReaderWriter) GetMarkerStrings(q *string, sort *string) ([] return r0, r1 } -// GetTagIDs provides a mock function with given fields: imageID -func (_m *SceneMarkerReaderWriter) GetTagIDs(imageID int) ([]int, error) { - ret := _m.Called(imageID) +// GetTagIDs provides a mock function with given fields: ctx, imageID +func (_m *SceneMarkerReaderWriter) GetTagIDs(ctx context.Context, imageID int) ([]int, error) { + ret := _m.Called(ctx, imageID) var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(imageID) + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, imageID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]int) @@ -176,8 +178,8 @@ func (_m *SceneMarkerReaderWriter) GetTagIDs(imageID int) ([]int, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(imageID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, imageID) } else { r1 = ret.Error(1) } @@ -185,13 +187,13 @@ func (_m *SceneMarkerReaderWriter) GetTagIDs(imageID int) ([]int, error) { return r0, r1 } -// Query provides a mock function with given fields: sceneMarkerFilter, findFilter -func (_m *SceneMarkerReaderWriter) Query(sceneMarkerFilter *models.SceneMarkerFilterType, findFilter *models.FindFilterType) ([]*models.SceneMarker, int, error) { - ret := _m.Called(sceneMarkerFilter, findFilter) +// Query provides a mock function with given fields: ctx, sceneMarkerFilter, findFilter +func (_m *SceneMarkerReaderWriter) Query(ctx context.Context, sceneMarkerFilter *models.SceneMarkerFilterType, findFilter *models.FindFilterType) ([]*models.SceneMarker, int, error) { + ret := _m.Called(ctx, sceneMarkerFilter, findFilter) var r0 []*models.SceneMarker - if rf, ok := ret.Get(0).(func(*models.SceneMarkerFilterType, *models.FindFilterType) []*models.SceneMarker); ok { - r0 = rf(sceneMarkerFilter, findFilter) + if rf, ok := ret.Get(0).(func(context.Context, *models.SceneMarkerFilterType, *models.FindFilterType) []*models.SceneMarker); ok { + r0 = rf(ctx, sceneMarkerFilter, findFilter) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.SceneMarker) @@ -199,15 +201,15 @@ func (_m *SceneMarkerReaderWriter) Query(sceneMarkerFilter *models.SceneMarkerFi } var r1 int - if rf, ok := ret.Get(1).(func(*models.SceneMarkerFilterType, *models.FindFilterType) int); ok { - r1 = rf(sceneMarkerFilter, findFilter) + if rf, ok := ret.Get(1).(func(context.Context, *models.SceneMarkerFilterType, *models.FindFilterType) int); ok { + r1 = rf(ctx, sceneMarkerFilter, findFilter) } else { r1 = ret.Get(1).(int) } var r2 error - if rf, ok := ret.Get(2).(func(*models.SceneMarkerFilterType, *models.FindFilterType) error); ok { - r2 = rf(sceneMarkerFilter, findFilter) + if rf, ok := ret.Get(2).(func(context.Context, *models.SceneMarkerFilterType, *models.FindFilterType) error); ok { + r2 = rf(ctx, sceneMarkerFilter, findFilter) } else { r2 = ret.Error(2) } @@ -215,13 +217,13 @@ func (_m *SceneMarkerReaderWriter) Query(sceneMarkerFilter *models.SceneMarkerFi return r0, r1, r2 } -// Update provides a mock function with given fields: updatedSceneMarker -func (_m *SceneMarkerReaderWriter) Update(updatedSceneMarker models.SceneMarker) (*models.SceneMarker, error) { - ret := _m.Called(updatedSceneMarker) +// Update provides a mock function with given fields: ctx, updatedSceneMarker +func (_m *SceneMarkerReaderWriter) Update(ctx context.Context, updatedSceneMarker models.SceneMarker) (*models.SceneMarker, error) { + ret := _m.Called(ctx, updatedSceneMarker) var r0 *models.SceneMarker - if rf, ok := ret.Get(0).(func(models.SceneMarker) *models.SceneMarker); ok { - r0 = rf(updatedSceneMarker) + if rf, ok := ret.Get(0).(func(context.Context, models.SceneMarker) *models.SceneMarker); ok { + r0 = rf(ctx, updatedSceneMarker) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.SceneMarker) @@ -229,8 +231,8 @@ func (_m *SceneMarkerReaderWriter) Update(updatedSceneMarker models.SceneMarker) } var r1 error - if rf, ok := ret.Get(1).(func(models.SceneMarker) error); ok { - r1 = rf(updatedSceneMarker) + if rf, ok := ret.Get(1).(func(context.Context, models.SceneMarker) error); ok { + r1 = rf(ctx, updatedSceneMarker) } else { r1 = ret.Error(1) } @@ -238,13 +240,13 @@ func (_m *SceneMarkerReaderWriter) Update(updatedSceneMarker models.SceneMarker) return r0, r1 } -// UpdateTags provides a mock function with given fields: markerID, tagIDs -func (_m *SceneMarkerReaderWriter) UpdateTags(markerID int, tagIDs []int) error { - ret := _m.Called(markerID, tagIDs) +// UpdateTags provides a mock function with given fields: ctx, markerID, tagIDs +func (_m *SceneMarkerReaderWriter) UpdateTags(ctx context.Context, markerID int, tagIDs []int) error { + ret := _m.Called(ctx, markerID, tagIDs) var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(markerID, tagIDs) + if rf, ok := ret.Get(0).(func(context.Context, int, []int) error); ok { + r0 = rf(ctx, markerID, tagIDs) } else { r0 = ret.Error(0) } @@ -252,13 +254,13 @@ func (_m *SceneMarkerReaderWriter) UpdateTags(markerID int, tagIDs []int) error return r0 } -// Wall provides a mock function with given fields: q -func (_m *SceneMarkerReaderWriter) Wall(q *string) ([]*models.SceneMarker, error) { - ret := _m.Called(q) +// Wall provides a mock function with given fields: ctx, q +func (_m *SceneMarkerReaderWriter) Wall(ctx context.Context, q *string) ([]*models.SceneMarker, error) { + ret := _m.Called(ctx, q) var r0 []*models.SceneMarker - if rf, ok := ret.Get(0).(func(*string) []*models.SceneMarker); ok { - r0 = rf(q) + if rf, ok := ret.Get(0).(func(context.Context, *string) []*models.SceneMarker); ok { + r0 = rf(ctx, q) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.SceneMarker) @@ -266,8 +268,8 @@ func (_m *SceneMarkerReaderWriter) Wall(q *string) ([]*models.SceneMarker, error } var r1 error - if rf, ok := ret.Get(1).(func(*string) error); ok { - r1 = rf(q) + if rf, ok := ret.Get(1).(func(context.Context, *string) error); ok { + r1 = rf(ctx, q) } else { r1 = ret.Error(1) } diff --git a/pkg/models/mocks/SceneReaderWriter.go b/pkg/models/mocks/SceneReaderWriter.go index 0635fd200..87b253686 100644 --- a/pkg/models/mocks/SceneReaderWriter.go +++ b/pkg/models/mocks/SceneReaderWriter.go @@ -3,8 +3,12 @@ package mocks import ( - models "github.com/stashapp/stash/pkg/models" + context "context" + + file "github.com/stashapp/stash/pkg/file" mock "github.com/stretchr/testify/mock" + + models "github.com/stashapp/stash/pkg/models" ) // SceneReaderWriter is an autogenerated mock type for the SceneReaderWriter type @@ -12,13 +16,13 @@ type SceneReaderWriter struct { mock.Mock } -// All provides a mock function with given fields: -func (_m *SceneReaderWriter) All() ([]*models.Scene, error) { - ret := _m.Called() +// All provides a mock function with given fields: ctx +func (_m *SceneReaderWriter) All(ctx context.Context) ([]*models.Scene, error) { + ret := _m.Called(ctx) var r0 []*models.Scene - if rf, ok := ret.Get(0).(func() []*models.Scene); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []*models.Scene); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Scene) @@ -26,8 +30,8 @@ func (_m *SceneReaderWriter) All() ([]*models.Scene, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -35,20 +39,20 @@ func (_m *SceneReaderWriter) All() ([]*models.Scene, error) { return r0, r1 } -// Count provides a mock function with given fields: -func (_m *SceneReaderWriter) Count() (int, error) { - ret := _m.Called() +// Count provides a mock function with given fields: ctx +func (_m *SceneReaderWriter) Count(ctx context.Context) (int, error) { + ret := _m.Called(ctx) var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -56,20 +60,20 @@ func (_m *SceneReaderWriter) Count() (int, error) { return r0, r1 } -// CountByMovieID provides a mock function with given fields: movieID -func (_m *SceneReaderWriter) CountByMovieID(movieID int) (int, error) { - ret := _m.Called(movieID) +// CountByMovieID provides a mock function with given fields: ctx, movieID +func (_m *SceneReaderWriter) CountByMovieID(ctx context.Context, movieID int) (int, error) { + ret := _m.Called(ctx, movieID) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(movieID) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, movieID) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(movieID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, movieID) } else { r1 = ret.Error(1) } @@ -77,20 +81,20 @@ func (_m *SceneReaderWriter) CountByMovieID(movieID int) (int, error) { return r0, r1 } -// CountByPerformerID provides a mock function with given fields: performerID -func (_m *SceneReaderWriter) CountByPerformerID(performerID int) (int, error) { - ret := _m.Called(performerID) +// CountByPerformerID provides a mock function with given fields: ctx, performerID +func (_m *SceneReaderWriter) CountByPerformerID(ctx context.Context, performerID int) (int, error) { + ret := _m.Called(ctx, performerID) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(performerID) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, performerID) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(performerID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) } else { r1 = ret.Error(1) } @@ -98,20 +102,20 @@ func (_m *SceneReaderWriter) CountByPerformerID(performerID int) (int, error) { return r0, r1 } -// CountByStudioID provides a mock function with given fields: studioID -func (_m *SceneReaderWriter) CountByStudioID(studioID int) (int, error) { - ret := _m.Called(studioID) +// CountByStudioID provides a mock function with given fields: ctx, studioID +func (_m *SceneReaderWriter) CountByStudioID(ctx context.Context, studioID int) (int, error) { + ret := _m.Called(ctx, studioID) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(studioID) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, studioID) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(studioID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, studioID) } else { r1 = ret.Error(1) } @@ -119,20 +123,20 @@ func (_m *SceneReaderWriter) CountByStudioID(studioID int) (int, error) { return r0, r1 } -// CountByTagID provides a mock function with given fields: tagID -func (_m *SceneReaderWriter) CountByTagID(tagID int) (int, error) { - ret := _m.Called(tagID) +// CountByTagID provides a mock function with given fields: ctx, tagID +func (_m *SceneReaderWriter) CountByTagID(ctx context.Context, tagID int) (int, error) { + ret := _m.Called(ctx, tagID) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(tagID) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, tagID) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(tagID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, tagID) } else { r1 = ret.Error(1) } @@ -140,20 +144,20 @@ func (_m *SceneReaderWriter) CountByTagID(tagID int) (int, error) { return r0, r1 } -// CountMissingChecksum provides a mock function with given fields: -func (_m *SceneReaderWriter) CountMissingChecksum() (int, error) { - ret := _m.Called() +// CountMissingChecksum provides a mock function with given fields: ctx +func (_m *SceneReaderWriter) CountMissingChecksum(ctx context.Context) (int, error) { + ret := _m.Called(ctx) var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -161,20 +165,20 @@ func (_m *SceneReaderWriter) CountMissingChecksum() (int, error) { return r0, r1 } -// CountMissingOSHash provides a mock function with given fields: -func (_m *SceneReaderWriter) CountMissingOSHash() (int, error) { - ret := _m.Called() +// CountMissingOSHash provides a mock function with given fields: ctx +func (_m *SceneReaderWriter) CountMissingOSHash(ctx context.Context) (int, error) { + ret := _m.Called(ctx) var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -182,57 +186,13 @@ func (_m *SceneReaderWriter) CountMissingOSHash() (int, error) { return r0, r1 } -// Create provides a mock function with given fields: newScene -func (_m *SceneReaderWriter) Create(newScene models.Scene) (*models.Scene, error) { - ret := _m.Called(newScene) - - var r0 *models.Scene - if rf, ok := ret.Get(0).(func(models.Scene) *models.Scene); ok { - r0 = rf(newScene) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Scene) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(models.Scene) error); ok { - r1 = rf(newScene) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecrementOCounter provides a mock function with given fields: id -func (_m *SceneReaderWriter) DecrementOCounter(id int) (int, error) { - ret := _m.Called(id) - - var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(id) - } else { - r0 = ret.Get(0).(int) - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Destroy provides a mock function with given fields: id -func (_m *SceneReaderWriter) Destroy(id int) error { - ret := _m.Called(id) +// Create provides a mock function with given fields: ctx, newScene, fileIDs +func (_m *SceneReaderWriter) Create(ctx context.Context, newScene *models.Scene, fileIDs []file.ID) error { + ret := _m.Called(ctx, newScene, fileIDs) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, *models.Scene, []file.ID) error); ok { + r0 = rf(ctx, newScene, fileIDs) } else { r0 = ret.Error(0) } @@ -240,13 +200,34 @@ func (_m *SceneReaderWriter) Destroy(id int) error { return r0 } -// DestroyCover provides a mock function with given fields: sceneID -func (_m *SceneReaderWriter) DestroyCover(sceneID int) error { - ret := _m.Called(sceneID) +// DecrementOCounter provides a mock function with given fields: ctx, id +func (_m *SceneReaderWriter) DecrementOCounter(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Destroy provides a mock function with given fields: ctx, id +func (_m *SceneReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) } else { r0 = ret.Error(0) } @@ -254,20 +235,34 @@ func (_m *SceneReaderWriter) DestroyCover(sceneID int) error { return r0 } -// Duration provides a mock function with given fields: -func (_m *SceneReaderWriter) Duration() (float64, error) { - ret := _m.Called() +// DestroyCover provides a mock function with given fields: ctx, sceneID +func (_m *SceneReaderWriter) DestroyCover(ctx context.Context, sceneID int) error { + ret := _m.Called(ctx, sceneID) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, sceneID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Duration provides a mock function with given fields: ctx +func (_m *SceneReaderWriter) Duration(ctx context.Context) (float64, error) { + ret := _m.Called(ctx) var r0 float64 - if rf, ok := ret.Get(0).(func() float64); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) float64); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(float64) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -275,13 +270,13 @@ func (_m *SceneReaderWriter) Duration() (float64, error) { return r0, r1 } -// Find provides a mock function with given fields: id -func (_m *SceneReaderWriter) Find(id int) (*models.Scene, error) { - ret := _m.Called(id) +// Find provides a mock function with given fields: ctx, id +func (_m *SceneReaderWriter) Find(ctx context.Context, id int) (*models.Scene, error) { + ret := _m.Called(ctx, id) var r0 *models.Scene - if rf, ok := ret.Get(0).(func(int) *models.Scene); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) *models.Scene); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Scene) @@ -289,8 +284,8 @@ func (_m *SceneReaderWriter) Find(id int) (*models.Scene, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -298,36 +293,13 @@ func (_m *SceneReaderWriter) Find(id int) (*models.Scene, error) { return r0, r1 } -// FindByChecksum provides a mock function with given fields: checksum -func (_m *SceneReaderWriter) FindByChecksum(checksum string) (*models.Scene, error) { - ret := _m.Called(checksum) - - var r0 *models.Scene - if rf, ok := ret.Get(0).(func(string) *models.Scene); ok { - r0 = rf(checksum) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Scene) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(checksum) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FindByGalleryID provides a mock function with given fields: performerID -func (_m *SceneReaderWriter) FindByGalleryID(performerID int) ([]*models.Scene, error) { - ret := _m.Called(performerID) +// FindByChecksum provides a mock function with given fields: ctx, checksum +func (_m *SceneReaderWriter) FindByChecksum(ctx context.Context, checksum string) ([]*models.Scene, error) { + ret := _m.Called(ctx, checksum) var r0 []*models.Scene - if rf, ok := ret.Get(0).(func(int) []*models.Scene); ok { - r0 = rf(performerID) + if rf, ok := ret.Get(0).(func(context.Context, string) []*models.Scene); ok { + r0 = rf(ctx, checksum) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Scene) @@ -335,8 +307,8 @@ func (_m *SceneReaderWriter) FindByGalleryID(performerID int) ([]*models.Scene, } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(performerID) + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, checksum) } else { r1 = ret.Error(1) } @@ -344,13 +316,13 @@ func (_m *SceneReaderWriter) FindByGalleryID(performerID int) ([]*models.Scene, return r0, r1 } -// FindByMovieID provides a mock function with given fields: movieID -func (_m *SceneReaderWriter) FindByMovieID(movieID int) ([]*models.Scene, error) { - ret := _m.Called(movieID) +// FindByGalleryID provides a mock function with given fields: ctx, performerID +func (_m *SceneReaderWriter) FindByGalleryID(ctx context.Context, performerID int) ([]*models.Scene, error) { + ret := _m.Called(ctx, performerID) var r0 []*models.Scene - if rf, ok := ret.Get(0).(func(int) []*models.Scene); ok { - r0 = rf(movieID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Scene); ok { + r0 = rf(ctx, performerID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Scene) @@ -358,8 +330,8 @@ func (_m *SceneReaderWriter) FindByMovieID(movieID int) ([]*models.Scene, error) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(movieID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) } else { r1 = ret.Error(1) } @@ -367,59 +339,13 @@ func (_m *SceneReaderWriter) FindByMovieID(movieID int) ([]*models.Scene, error) return r0, r1 } -// FindByOSHash provides a mock function with given fields: oshash -func (_m *SceneReaderWriter) FindByOSHash(oshash string) (*models.Scene, error) { - ret := _m.Called(oshash) - - var r0 *models.Scene - if rf, ok := ret.Get(0).(func(string) *models.Scene); ok { - r0 = rf(oshash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Scene) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(oshash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FindByPath provides a mock function with given fields: path -func (_m *SceneReaderWriter) FindByPath(path string) (*models.Scene, error) { - ret := _m.Called(path) - - var r0 *models.Scene - if rf, ok := ret.Get(0).(func(string) *models.Scene); ok { - r0 = rf(path) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Scene) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(path) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FindByPerformerID provides a mock function with given fields: performerID -func (_m *SceneReaderWriter) FindByPerformerID(performerID int) ([]*models.Scene, error) { - ret := _m.Called(performerID) +// FindByMovieID provides a mock function with given fields: ctx, movieID +func (_m *SceneReaderWriter) FindByMovieID(ctx context.Context, movieID int) ([]*models.Scene, error) { + ret := _m.Called(ctx, movieID) var r0 []*models.Scene - if rf, ok := ret.Get(0).(func(int) []*models.Scene); ok { - r0 = rf(performerID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Scene); ok { + r0 = rf(ctx, movieID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Scene) @@ -427,8 +353,8 @@ func (_m *SceneReaderWriter) FindByPerformerID(performerID int) ([]*models.Scene } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(performerID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, movieID) } else { r1 = ret.Error(1) } @@ -436,13 +362,82 @@ func (_m *SceneReaderWriter) FindByPerformerID(performerID int) ([]*models.Scene return r0, r1 } -// FindDuplicates provides a mock function with given fields: distance -func (_m *SceneReaderWriter) FindDuplicates(distance int) ([][]*models.Scene, error) { - ret := _m.Called(distance) +// FindByOSHash provides a mock function with given fields: ctx, oshash +func (_m *SceneReaderWriter) FindByOSHash(ctx context.Context, oshash string) ([]*models.Scene, error) { + ret := _m.Called(ctx, oshash) + + var r0 []*models.Scene + if rf, ok := ret.Get(0).(func(context.Context, string) []*models.Scene); ok { + r0 = rf(ctx, oshash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Scene) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, oshash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByPath provides a mock function with given fields: ctx, path +func (_m *SceneReaderWriter) FindByPath(ctx context.Context, path string) ([]*models.Scene, error) { + ret := _m.Called(ctx, path) + + var r0 []*models.Scene + if rf, ok := ret.Get(0).(func(context.Context, string) []*models.Scene); ok { + r0 = rf(ctx, path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Scene) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByPerformerID provides a mock function with given fields: ctx, performerID +func (_m *SceneReaderWriter) FindByPerformerID(ctx context.Context, performerID int) ([]*models.Scene, error) { + ret := _m.Called(ctx, performerID) + + var r0 []*models.Scene + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Scene); ok { + r0 = rf(ctx, performerID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Scene) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindDuplicates provides a mock function with given fields: ctx, distance +func (_m *SceneReaderWriter) FindDuplicates(ctx context.Context, distance int) ([][]*models.Scene, error) { + ret := _m.Called(ctx, distance) var r0 [][]*models.Scene - if rf, ok := ret.Get(0).(func(int) [][]*models.Scene); ok { - r0 = rf(distance) + if rf, ok := ret.Get(0).(func(context.Context, int) [][]*models.Scene); ok { + r0 = rf(ctx, distance) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([][]*models.Scene) @@ -450,8 +445,8 @@ func (_m *SceneReaderWriter) FindDuplicates(distance int) ([][]*models.Scene, er } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(distance) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, distance) } else { r1 = ret.Error(1) } @@ -459,13 +454,13 @@ func (_m *SceneReaderWriter) FindDuplicates(distance int) ([][]*models.Scene, er return r0, r1 } -// FindMany provides a mock function with given fields: ids -func (_m *SceneReaderWriter) FindMany(ids []int) ([]*models.Scene, error) { - ret := _m.Called(ids) +// FindMany provides a mock function with given fields: ctx, ids +func (_m *SceneReaderWriter) FindMany(ctx context.Context, ids []int) ([]*models.Scene, error) { + ret := _m.Called(ctx, ids) var r0 []*models.Scene - if rf, ok := ret.Get(0).(func([]int) []*models.Scene); ok { - r0 = rf(ids) + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.Scene); ok { + r0 = rf(ctx, ids) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Scene) @@ -473,8 +468,8 @@ func (_m *SceneReaderWriter) FindMany(ids []int) ([]*models.Scene, error) { } var r1 error - if rf, ok := ret.Get(1).(func([]int) error); ok { - r1 = rf(ids) + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) } else { r1 = ret.Error(1) } @@ -482,36 +477,13 @@ func (_m *SceneReaderWriter) FindMany(ids []int) ([]*models.Scene, error) { return r0, r1 } -// GetCaptions provides a mock function with given fields: sceneID -func (_m *SceneReaderWriter) GetCaptions(sceneID int) ([]*models.SceneCaption, error) { - ret := _m.Called(sceneID) - - var r0 []*models.SceneCaption - if rf, ok := ret.Get(0).(func(int) []*models.SceneCaption); ok { - r0 = rf(sceneID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*models.SceneCaption) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetCover provides a mock function with given fields: sceneID -func (_m *SceneReaderWriter) GetCover(sceneID int) ([]byte, error) { - ret := _m.Called(sceneID) +// GetCover provides a mock function with given fields: ctx, sceneID +func (_m *SceneReaderWriter) GetCover(ctx context.Context, sceneID int) ([]byte, error) { + ret := _m.Called(ctx, sceneID) var r0 []byte - if rf, ok := ret.Get(0).(func(int) []byte); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []byte); ok { + r0 = rf(ctx, sceneID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) @@ -519,8 +491,8 @@ func (_m *SceneReaderWriter) GetCover(sceneID int) ([]byte, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, sceneID) } else { r1 = ret.Error(1) } @@ -528,13 +500,36 @@ func (_m *SceneReaderWriter) GetCover(sceneID int) ([]byte, error) { return r0, r1 } -// GetGalleryIDs provides a mock function with given fields: sceneID -func (_m *SceneReaderWriter) GetGalleryIDs(sceneID int) ([]int, error) { - ret := _m.Called(sceneID) +// GetFiles provides a mock function with given fields: ctx, relatedID +func (_m *SceneReaderWriter) GetFiles(ctx context.Context, relatedID int) ([]*file.VideoFile, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []*file.VideoFile + if rf, ok := ret.Get(0).(func(context.Context, int) []*file.VideoFile); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*file.VideoFile) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetGalleryIDs provides a mock function with given fields: ctx, relatedID +func (_m *SceneReaderWriter) GetGalleryIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]int) @@ -542,8 +537,8 @@ func (_m *SceneReaderWriter) GetGalleryIDs(sceneID int) ([]int, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) } else { r1 = ret.Error(1) } @@ -551,13 +546,13 @@ func (_m *SceneReaderWriter) GetGalleryIDs(sceneID int) ([]int, error) { return r0, r1 } -// GetMovies provides a mock function with given fields: sceneID -func (_m *SceneReaderWriter) GetMovies(sceneID int) ([]models.MoviesScenes, error) { - ret := _m.Called(sceneID) +// GetMovies provides a mock function with given fields: ctx, id +func (_m *SceneReaderWriter) GetMovies(ctx context.Context, id int) ([]models.MoviesScenes, error) { + ret := _m.Called(ctx, id) var r0 []models.MoviesScenes - if rf, ok := ret.Get(0).(func(int) []models.MoviesScenes); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []models.MoviesScenes); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]models.MoviesScenes) @@ -565,8 +560,8 @@ func (_m *SceneReaderWriter) GetMovies(sceneID int) ([]models.MoviesScenes, erro } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -574,13 +569,13 @@ func (_m *SceneReaderWriter) GetMovies(sceneID int) ([]models.MoviesScenes, erro return r0, r1 } -// GetPerformerIDs provides a mock function with given fields: sceneID -func (_m *SceneReaderWriter) GetPerformerIDs(sceneID int) ([]int, error) { - ret := _m.Called(sceneID) +// GetPerformerIDs provides a mock function with given fields: ctx, relatedID +func (_m *SceneReaderWriter) GetPerformerIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]int) @@ -588,8 +583,8 @@ func (_m *SceneReaderWriter) GetPerformerIDs(sceneID int) ([]int, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) } else { r1 = ret.Error(1) } @@ -597,22 +592,22 @@ func (_m *SceneReaderWriter) GetPerformerIDs(sceneID int) ([]int, error) { return r0, r1 } -// GetStashIDs provides a mock function with given fields: sceneID -func (_m *SceneReaderWriter) GetStashIDs(sceneID int) ([]*models.StashID, error) { - ret := _m.Called(sceneID) +// GetStashIDs provides a mock function with given fields: ctx, relatedID +func (_m *SceneReaderWriter) GetStashIDs(ctx context.Context, relatedID int) ([]models.StashID, error) { + ret := _m.Called(ctx, relatedID) - var r0 []*models.StashID - if rf, ok := ret.Get(0).(func(int) []*models.StashID); ok { - r0 = rf(sceneID) + var r0 []models.StashID + if rf, ok := ret.Get(0).(func(context.Context, int) []models.StashID); ok { + r0 = rf(ctx, relatedID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*models.StashID) + r0 = ret.Get(0).([]models.StashID) } } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) } else { r1 = ret.Error(1) } @@ -620,13 +615,13 @@ func (_m *SceneReaderWriter) GetStashIDs(sceneID int) ([]*models.StashID, error) return r0, r1 } -// GetTagIDs provides a mock function with given fields: sceneID -func (_m *SceneReaderWriter) GetTagIDs(sceneID int) ([]int, error) { - ret := _m.Called(sceneID) +// GetTagIDs provides a mock function with given fields: ctx, relatedID +func (_m *SceneReaderWriter) GetTagIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) var r0 []int - if rf, ok := ret.Get(0).(func(int) []int); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]int) @@ -634,8 +629,8 @@ func (_m *SceneReaderWriter) GetTagIDs(sceneID int) ([]int, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) } else { r1 = ret.Error(1) } @@ -643,20 +638,20 @@ func (_m *SceneReaderWriter) GetTagIDs(sceneID int) ([]int, error) { return r0, r1 } -// IncrementOCounter provides a mock function with given fields: id -func (_m *SceneReaderWriter) IncrementOCounter(id int) (int, error) { - ret := _m.Called(id) +// IncrementOCounter provides a mock function with given fields: ctx, id +func (_m *SceneReaderWriter) IncrementOCounter(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -664,13 +659,13 @@ func (_m *SceneReaderWriter) IncrementOCounter(id int) (int, error) { return r0, r1 } -// Query provides a mock function with given fields: options -func (_m *SceneReaderWriter) Query(options models.SceneQueryOptions) (*models.SceneQueryResult, error) { - ret := _m.Called(options) +// Query provides a mock function with given fields: ctx, options +func (_m *SceneReaderWriter) Query(ctx context.Context, options models.SceneQueryOptions) (*models.SceneQueryResult, error) { + ret := _m.Called(ctx, options) var r0 *models.SceneQueryResult - if rf, ok := ret.Get(0).(func(models.SceneQueryOptions) *models.SceneQueryResult); ok { - r0 = rf(options) + if rf, ok := ret.Get(0).(func(context.Context, models.SceneQueryOptions) *models.SceneQueryResult); ok { + r0 = rf(ctx, options) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.SceneQueryResult) @@ -678,8 +673,8 @@ func (_m *SceneReaderWriter) Query(options models.SceneQueryOptions) (*models.Sc } var r1 error - if rf, ok := ret.Get(1).(func(models.SceneQueryOptions) error); ok { - r1 = rf(options) + if rf, ok := ret.Get(1).(func(context.Context, models.SceneQueryOptions) error); ok { + r1 = rf(ctx, options) } else { r1 = ret.Error(1) } @@ -687,20 +682,20 @@ func (_m *SceneReaderWriter) Query(options models.SceneQueryOptions) (*models.Sc return r0, r1 } -// ResetOCounter provides a mock function with given fields: id -func (_m *SceneReaderWriter) ResetOCounter(id int) (int, error) { - ret := _m.Called(id) +// ResetOCounter provides a mock function with given fields: ctx, id +func (_m *SceneReaderWriter) ResetOCounter(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) var r0 int - if rf, ok := ret.Get(0).(func(int) int); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -708,20 +703,20 @@ func (_m *SceneReaderWriter) ResetOCounter(id int) (int, error) { return r0, r1 } -// Size provides a mock function with given fields: -func (_m *SceneReaderWriter) Size() (float64, error) { - ret := _m.Called() +// Size provides a mock function with given fields: ctx +func (_m *SceneReaderWriter) Size(ctx context.Context) (float64, error) { + ret := _m.Called(ctx) var r0 float64 - if rf, ok := ret.Get(0).(func() float64); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) float64); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(float64) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -729,13 +724,41 @@ func (_m *SceneReaderWriter) Size() (float64, error) { return r0, r1 } -// Update provides a mock function with given fields: updatedScene -func (_m *SceneReaderWriter) Update(updatedScene models.ScenePartial) (*models.Scene, error) { - ret := _m.Called(updatedScene) +// Update provides a mock function with given fields: ctx, updatedScene +func (_m *SceneReaderWriter) Update(ctx context.Context, updatedScene *models.Scene) error { + ret := _m.Called(ctx, updatedScene) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *models.Scene) error); ok { + r0 = rf(ctx, updatedScene) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateCover provides a mock function with given fields: ctx, sceneID, cover +func (_m *SceneReaderWriter) UpdateCover(ctx context.Context, sceneID int, cover []byte) error { + ret := _m.Called(ctx, sceneID, cover) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) error); ok { + r0 = rf(ctx, sceneID, cover) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdatePartial provides a mock function with given fields: ctx, id, updatedScene +func (_m *SceneReaderWriter) UpdatePartial(ctx context.Context, id int, updatedScene models.ScenePartial) (*models.Scene, error) { + ret := _m.Called(ctx, id, updatedScene) var r0 *models.Scene - if rf, ok := ret.Get(0).(func(models.ScenePartial) *models.Scene); ok { - r0 = rf(updatedScene) + if rf, ok := ret.Get(0).(func(context.Context, int, models.ScenePartial) *models.Scene); ok { + r0 = rf(ctx, id, updatedScene) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Scene) @@ -743,8 +766,8 @@ func (_m *SceneReaderWriter) Update(updatedScene models.ScenePartial) (*models.S } var r1 error - if rf, ok := ret.Get(1).(func(models.ScenePartial) error); ok { - r1 = rf(updatedScene) + if rf, ok := ret.Get(1).(func(context.Context, int, models.ScenePartial) error); ok { + r1 = rf(ctx, id, updatedScene) } else { r1 = ret.Error(1) } @@ -752,148 +775,13 @@ func (_m *SceneReaderWriter) Update(updatedScene models.ScenePartial) (*models.S return r0, r1 } -// UpdateCaptions provides a mock function with given fields: id, captions -func (_m *SceneReaderWriter) UpdateCaptions(id int, captions []*models.SceneCaption) error { - ret := _m.Called(id, captions) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []*models.SceneCaption) error); ok { - r0 = rf(id, captions) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateCover provides a mock function with given fields: sceneID, cover -func (_m *SceneReaderWriter) UpdateCover(sceneID int, cover []byte) error { - ret := _m.Called(sceneID, cover) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []byte) error); ok { - r0 = rf(sceneID, cover) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateFileModTime provides a mock function with given fields: id, modTime -func (_m *SceneReaderWriter) UpdateFileModTime(id int, modTime models.NullSQLiteTimestamp) error { - ret := _m.Called(id, modTime) - - var r0 error - if rf, ok := ret.Get(0).(func(int, models.NullSQLiteTimestamp) error); ok { - r0 = rf(id, modTime) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateFull provides a mock function with given fields: updatedScene -func (_m *SceneReaderWriter) UpdateFull(updatedScene models.Scene) (*models.Scene, error) { - ret := _m.Called(updatedScene) - - var r0 *models.Scene - if rf, ok := ret.Get(0).(func(models.Scene) *models.Scene); ok { - r0 = rf(updatedScene) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*models.Scene) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(models.Scene) error); ok { - r1 = rf(updatedScene) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UpdateGalleries provides a mock function with given fields: sceneID, galleryIDs -func (_m *SceneReaderWriter) UpdateGalleries(sceneID int, galleryIDs []int) error { - ret := _m.Called(sceneID, galleryIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(sceneID, galleryIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateMovies provides a mock function with given fields: sceneID, movies -func (_m *SceneReaderWriter) UpdateMovies(sceneID int, movies []models.MoviesScenes) error { - ret := _m.Called(sceneID, movies) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []models.MoviesScenes) error); ok { - r0 = rf(sceneID, movies) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdatePerformers provides a mock function with given fields: sceneID, performerIDs -func (_m *SceneReaderWriter) UpdatePerformers(sceneID int, performerIDs []int) error { - ret := _m.Called(sceneID, performerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(sceneID, performerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateStashIDs provides a mock function with given fields: sceneID, stashIDs -func (_m *SceneReaderWriter) UpdateStashIDs(sceneID int, stashIDs []models.StashID) error { - ret := _m.Called(sceneID, stashIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []models.StashID) error); ok { - r0 = rf(sceneID, stashIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateTags provides a mock function with given fields: sceneID, tagIDs -func (_m *SceneReaderWriter) UpdateTags(sceneID int, tagIDs []int) error { - ret := _m.Called(sceneID, tagIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(sceneID, tagIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Wall provides a mock function with given fields: q -func (_m *SceneReaderWriter) Wall(q *string) ([]*models.Scene, error) { - ret := _m.Called(q) +// Wall provides a mock function with given fields: ctx, q +func (_m *SceneReaderWriter) Wall(ctx context.Context, q *string) ([]*models.Scene, error) { + ret := _m.Called(ctx, q) var r0 []*models.Scene - if rf, ok := ret.Get(0).(func(*string) []*models.Scene); ok { - r0 = rf(q) + if rf, ok := ret.Get(0).(func(context.Context, *string) []*models.Scene); ok { + r0 = rf(ctx, q) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Scene) @@ -901,8 +789,8 @@ func (_m *SceneReaderWriter) Wall(q *string) ([]*models.Scene, error) { } var r1 error - if rf, ok := ret.Get(1).(func(*string) error); ok { - r1 = rf(q) + if rf, ok := ret.Get(1).(func(context.Context, *string) error); ok { + r1 = rf(ctx, q) } else { r1 = ret.Error(1) } diff --git a/pkg/models/mocks/ScrapedItemReaderWriter.go b/pkg/models/mocks/ScrapedItemReaderWriter.go index e06b7451d..7157ab855 100644 --- a/pkg/models/mocks/ScrapedItemReaderWriter.go +++ b/pkg/models/mocks/ScrapedItemReaderWriter.go @@ -3,6 +3,8 @@ package mocks import ( + context "context" + models "github.com/stashapp/stash/pkg/models" mock "github.com/stretchr/testify/mock" ) @@ -12,13 +14,13 @@ type ScrapedItemReaderWriter struct { mock.Mock } -// All provides a mock function with given fields: -func (_m *ScrapedItemReaderWriter) All() ([]*models.ScrapedItem, error) { - ret := _m.Called() +// All provides a mock function with given fields: ctx +func (_m *ScrapedItemReaderWriter) All(ctx context.Context) ([]*models.ScrapedItem, error) { + ret := _m.Called(ctx) var r0 []*models.ScrapedItem - if rf, ok := ret.Get(0).(func() []*models.ScrapedItem); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []*models.ScrapedItem); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.ScrapedItem) @@ -26,8 +28,8 @@ func (_m *ScrapedItemReaderWriter) All() ([]*models.ScrapedItem, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -35,13 +37,13 @@ func (_m *ScrapedItemReaderWriter) All() ([]*models.ScrapedItem, error) { return r0, r1 } -// Create provides a mock function with given fields: newObject -func (_m *ScrapedItemReaderWriter) Create(newObject models.ScrapedItem) (*models.ScrapedItem, error) { - ret := _m.Called(newObject) +// Create provides a mock function with given fields: ctx, newObject +func (_m *ScrapedItemReaderWriter) Create(ctx context.Context, newObject models.ScrapedItem) (*models.ScrapedItem, error) { + ret := _m.Called(ctx, newObject) var r0 *models.ScrapedItem - if rf, ok := ret.Get(0).(func(models.ScrapedItem) *models.ScrapedItem); ok { - r0 = rf(newObject) + if rf, ok := ret.Get(0).(func(context.Context, models.ScrapedItem) *models.ScrapedItem); ok { + r0 = rf(ctx, newObject) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.ScrapedItem) @@ -49,8 +51,8 @@ func (_m *ScrapedItemReaderWriter) Create(newObject models.ScrapedItem) (*models } var r1 error - if rf, ok := ret.Get(1).(func(models.ScrapedItem) error); ok { - r1 = rf(newObject) + if rf, ok := ret.Get(1).(func(context.Context, models.ScrapedItem) error); ok { + r1 = rf(ctx, newObject) } else { r1 = ret.Error(1) } diff --git a/pkg/models/mocks/StudioReaderWriter.go b/pkg/models/mocks/StudioReaderWriter.go index c15c73719..043bfdecc 100644 --- a/pkg/models/mocks/StudioReaderWriter.go +++ b/pkg/models/mocks/StudioReaderWriter.go @@ -3,6 +3,8 @@ package mocks import ( + context "context" + models "github.com/stashapp/stash/pkg/models" mock "github.com/stretchr/testify/mock" ) @@ -12,13 +14,13 @@ type StudioReaderWriter struct { mock.Mock } -// All provides a mock function with given fields: -func (_m *StudioReaderWriter) All() ([]*models.Studio, error) { - ret := _m.Called() +// All provides a mock function with given fields: ctx +func (_m *StudioReaderWriter) All(ctx context.Context) ([]*models.Studio, error) { + ret := _m.Called(ctx) var r0 []*models.Studio - if rf, ok := ret.Get(0).(func() []*models.Studio); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []*models.Studio); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Studio) @@ -26,8 +28,8 @@ func (_m *StudioReaderWriter) All() ([]*models.Studio, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -35,20 +37,20 @@ func (_m *StudioReaderWriter) All() ([]*models.Studio, error) { return r0, r1 } -// Count provides a mock function with given fields: -func (_m *StudioReaderWriter) Count() (int, error) { - ret := _m.Called() +// Count provides a mock function with given fields: ctx +func (_m *StudioReaderWriter) Count(ctx context.Context) (int, error) { + ret := _m.Called(ctx) var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -56,13 +58,13 @@ func (_m *StudioReaderWriter) Count() (int, error) { return r0, r1 } -// Create provides a mock function with given fields: newStudio -func (_m *StudioReaderWriter) Create(newStudio models.Studio) (*models.Studio, error) { - ret := _m.Called(newStudio) +// Create provides a mock function with given fields: ctx, newStudio +func (_m *StudioReaderWriter) Create(ctx context.Context, newStudio models.Studio) (*models.Studio, error) { + ret := _m.Called(ctx, newStudio) var r0 *models.Studio - if rf, ok := ret.Get(0).(func(models.Studio) *models.Studio); ok { - r0 = rf(newStudio) + if rf, ok := ret.Get(0).(func(context.Context, models.Studio) *models.Studio); ok { + r0 = rf(ctx, newStudio) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Studio) @@ -70,8 +72,8 @@ func (_m *StudioReaderWriter) Create(newStudio models.Studio) (*models.Studio, e } var r1 error - if rf, ok := ret.Get(1).(func(models.Studio) error); ok { - r1 = rf(newStudio) + if rf, ok := ret.Get(1).(func(context.Context, models.Studio) error); ok { + r1 = rf(ctx, newStudio) } else { r1 = ret.Error(1) } @@ -79,13 +81,13 @@ func (_m *StudioReaderWriter) Create(newStudio models.Studio) (*models.Studio, e return r0, r1 } -// Destroy provides a mock function with given fields: id -func (_m *StudioReaderWriter) Destroy(id int) error { - ret := _m.Called(id) +// Destroy provides a mock function with given fields: ctx, id +func (_m *StudioReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) } else { r0 = ret.Error(0) } @@ -93,13 +95,13 @@ func (_m *StudioReaderWriter) Destroy(id int) error { return r0 } -// DestroyImage provides a mock function with given fields: studioID -func (_m *StudioReaderWriter) DestroyImage(studioID int) error { - ret := _m.Called(studioID) +// DestroyImage provides a mock function with given fields: ctx, studioID +func (_m *StudioReaderWriter) DestroyImage(ctx context.Context, studioID int) error { + ret := _m.Called(ctx, studioID) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(studioID) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, studioID) } else { r0 = ret.Error(0) } @@ -107,13 +109,13 @@ func (_m *StudioReaderWriter) DestroyImage(studioID int) error { return r0 } -// Find provides a mock function with given fields: id -func (_m *StudioReaderWriter) Find(id int) (*models.Studio, error) { - ret := _m.Called(id) +// Find provides a mock function with given fields: ctx, id +func (_m *StudioReaderWriter) Find(ctx context.Context, id int) (*models.Studio, error) { + ret := _m.Called(ctx, id) var r0 *models.Studio - if rf, ok := ret.Get(0).(func(int) *models.Studio); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) *models.Studio); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Studio) @@ -121,8 +123,8 @@ func (_m *StudioReaderWriter) Find(id int) (*models.Studio, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -130,13 +132,13 @@ func (_m *StudioReaderWriter) Find(id int) (*models.Studio, error) { return r0, r1 } -// FindByName provides a mock function with given fields: name, nocase -func (_m *StudioReaderWriter) FindByName(name string, nocase bool) (*models.Studio, error) { - ret := _m.Called(name, nocase) +// FindByName provides a mock function with given fields: ctx, name, nocase +func (_m *StudioReaderWriter) FindByName(ctx context.Context, name string, nocase bool) (*models.Studio, error) { + ret := _m.Called(ctx, name, nocase) var r0 *models.Studio - if rf, ok := ret.Get(0).(func(string, bool) *models.Studio); ok { - r0 = rf(name, nocase) + if rf, ok := ret.Get(0).(func(context.Context, string, bool) *models.Studio); ok { + r0 = rf(ctx, name, nocase) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Studio) @@ -144,8 +146,8 @@ func (_m *StudioReaderWriter) FindByName(name string, nocase bool) (*models.Stud } var r1 error - if rf, ok := ret.Get(1).(func(string, bool) error); ok { - r1 = rf(name, nocase) + if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { + r1 = rf(ctx, name, nocase) } else { r1 = ret.Error(1) } @@ -153,13 +155,13 @@ func (_m *StudioReaderWriter) FindByName(name string, nocase bool) (*models.Stud return r0, r1 } -// FindByStashID provides a mock function with given fields: stashID -func (_m *StudioReaderWriter) FindByStashID(stashID models.StashID) ([]*models.Studio, error) { - ret := _m.Called(stashID) +// FindByStashID provides a mock function with given fields: ctx, stashID +func (_m *StudioReaderWriter) FindByStashID(ctx context.Context, stashID models.StashID) ([]*models.Studio, error) { + ret := _m.Called(ctx, stashID) var r0 []*models.Studio - if rf, ok := ret.Get(0).(func(models.StashID) []*models.Studio); ok { - r0 = rf(stashID) + if rf, ok := ret.Get(0).(func(context.Context, models.StashID) []*models.Studio); ok { + r0 = rf(ctx, stashID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Studio) @@ -167,8 +169,8 @@ func (_m *StudioReaderWriter) FindByStashID(stashID models.StashID) ([]*models.S } var r1 error - if rf, ok := ret.Get(1).(func(models.StashID) error); ok { - r1 = rf(stashID) + if rf, ok := ret.Get(1).(func(context.Context, models.StashID) error); ok { + r1 = rf(ctx, stashID) } else { r1 = ret.Error(1) } @@ -176,13 +178,13 @@ func (_m *StudioReaderWriter) FindByStashID(stashID models.StashID) ([]*models.S return r0, r1 } -// FindChildren provides a mock function with given fields: id -func (_m *StudioReaderWriter) FindChildren(id int) ([]*models.Studio, error) { - ret := _m.Called(id) +// FindChildren provides a mock function with given fields: ctx, id +func (_m *StudioReaderWriter) FindChildren(ctx context.Context, id int) ([]*models.Studio, error) { + ret := _m.Called(ctx, id) var r0 []*models.Studio - if rf, ok := ret.Get(0).(func(int) []*models.Studio); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Studio); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Studio) @@ -190,8 +192,8 @@ func (_m *StudioReaderWriter) FindChildren(id int) ([]*models.Studio, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -199,13 +201,13 @@ func (_m *StudioReaderWriter) FindChildren(id int) ([]*models.Studio, error) { return r0, r1 } -// FindMany provides a mock function with given fields: ids -func (_m *StudioReaderWriter) FindMany(ids []int) ([]*models.Studio, error) { - ret := _m.Called(ids) +// FindMany provides a mock function with given fields: ctx, ids +func (_m *StudioReaderWriter) FindMany(ctx context.Context, ids []int) ([]*models.Studio, error) { + ret := _m.Called(ctx, ids) var r0 []*models.Studio - if rf, ok := ret.Get(0).(func([]int) []*models.Studio); ok { - r0 = rf(ids) + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.Studio); ok { + r0 = rf(ctx, ids) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Studio) @@ -213,8 +215,8 @@ func (_m *StudioReaderWriter) FindMany(ids []int) ([]*models.Studio, error) { } var r1 error - if rf, ok := ret.Get(1).(func([]int) error); ok { - r1 = rf(ids) + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) } else { r1 = ret.Error(1) } @@ -222,13 +224,13 @@ func (_m *StudioReaderWriter) FindMany(ids []int) ([]*models.Studio, error) { return r0, r1 } -// GetAliases provides a mock function with given fields: studioID -func (_m *StudioReaderWriter) GetAliases(studioID int) ([]string, error) { - ret := _m.Called(studioID) +// GetAliases provides a mock function with given fields: ctx, studioID +func (_m *StudioReaderWriter) GetAliases(ctx context.Context, studioID int) ([]string, error) { + ret := _m.Called(ctx, studioID) var r0 []string - if rf, ok := ret.Get(0).(func(int) []string); ok { - r0 = rf(studioID) + if rf, ok := ret.Get(0).(func(context.Context, int) []string); ok { + r0 = rf(ctx, studioID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]string) @@ -236,8 +238,8 @@ func (_m *StudioReaderWriter) GetAliases(studioID int) ([]string, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(studioID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, studioID) } else { r1 = ret.Error(1) } @@ -245,13 +247,13 @@ func (_m *StudioReaderWriter) GetAliases(studioID int) ([]string, error) { return r0, r1 } -// GetImage provides a mock function with given fields: studioID -func (_m *StudioReaderWriter) GetImage(studioID int) ([]byte, error) { - ret := _m.Called(studioID) +// GetImage provides a mock function with given fields: ctx, studioID +func (_m *StudioReaderWriter) GetImage(ctx context.Context, studioID int) ([]byte, error) { + ret := _m.Called(ctx, studioID) var r0 []byte - if rf, ok := ret.Get(0).(func(int) []byte); ok { - r0 = rf(studioID) + if rf, ok := ret.Get(0).(func(context.Context, int) []byte); ok { + r0 = rf(ctx, studioID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) @@ -259,8 +261,8 @@ func (_m *StudioReaderWriter) GetImage(studioID int) ([]byte, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(studioID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, studioID) } else { r1 = ret.Error(1) } @@ -268,22 +270,22 @@ func (_m *StudioReaderWriter) GetImage(studioID int) ([]byte, error) { return r0, r1 } -// GetStashIDs provides a mock function with given fields: studioID -func (_m *StudioReaderWriter) GetStashIDs(studioID int) ([]*models.StashID, error) { - ret := _m.Called(studioID) +// GetStashIDs provides a mock function with given fields: ctx, relatedID +func (_m *StudioReaderWriter) GetStashIDs(ctx context.Context, relatedID int) ([]models.StashID, error) { + ret := _m.Called(ctx, relatedID) - var r0 []*models.StashID - if rf, ok := ret.Get(0).(func(int) []*models.StashID); ok { - r0 = rf(studioID) + var r0 []models.StashID + if rf, ok := ret.Get(0).(func(context.Context, int) []models.StashID); ok { + r0 = rf(ctx, relatedID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*models.StashID) + r0 = ret.Get(0).([]models.StashID) } } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(studioID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) } else { r1 = ret.Error(1) } @@ -291,20 +293,20 @@ func (_m *StudioReaderWriter) GetStashIDs(studioID int) ([]*models.StashID, erro return r0, r1 } -// HasImage provides a mock function with given fields: studioID -func (_m *StudioReaderWriter) HasImage(studioID int) (bool, error) { - ret := _m.Called(studioID) +// HasImage provides a mock function with given fields: ctx, studioID +func (_m *StudioReaderWriter) HasImage(ctx context.Context, studioID int) (bool, error) { + ret := _m.Called(ctx, studioID) var r0 bool - if rf, ok := ret.Get(0).(func(int) bool); ok { - r0 = rf(studioID) + if rf, ok := ret.Get(0).(func(context.Context, int) bool); ok { + r0 = rf(ctx, studioID) } else { r0 = ret.Get(0).(bool) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(studioID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, studioID) } else { r1 = ret.Error(1) } @@ -312,13 +314,13 @@ func (_m *StudioReaderWriter) HasImage(studioID int) (bool, error) { return r0, r1 } -// Query provides a mock function with given fields: studioFilter, findFilter -func (_m *StudioReaderWriter) Query(studioFilter *models.StudioFilterType, findFilter *models.FindFilterType) ([]*models.Studio, int, error) { - ret := _m.Called(studioFilter, findFilter) +// Query provides a mock function with given fields: ctx, studioFilter, findFilter +func (_m *StudioReaderWriter) Query(ctx context.Context, studioFilter *models.StudioFilterType, findFilter *models.FindFilterType) ([]*models.Studio, int, error) { + ret := _m.Called(ctx, studioFilter, findFilter) var r0 []*models.Studio - if rf, ok := ret.Get(0).(func(*models.StudioFilterType, *models.FindFilterType) []*models.Studio); ok { - r0 = rf(studioFilter, findFilter) + if rf, ok := ret.Get(0).(func(context.Context, *models.StudioFilterType, *models.FindFilterType) []*models.Studio); ok { + r0 = rf(ctx, studioFilter, findFilter) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Studio) @@ -326,15 +328,15 @@ func (_m *StudioReaderWriter) Query(studioFilter *models.StudioFilterType, findF } var r1 int - if rf, ok := ret.Get(1).(func(*models.StudioFilterType, *models.FindFilterType) int); ok { - r1 = rf(studioFilter, findFilter) + if rf, ok := ret.Get(1).(func(context.Context, *models.StudioFilterType, *models.FindFilterType) int); ok { + r1 = rf(ctx, studioFilter, findFilter) } else { r1 = ret.Get(1).(int) } var r2 error - if rf, ok := ret.Get(2).(func(*models.StudioFilterType, *models.FindFilterType) error); ok { - r2 = rf(studioFilter, findFilter) + if rf, ok := ret.Get(2).(func(context.Context, *models.StudioFilterType, *models.FindFilterType) error); ok { + r2 = rf(ctx, studioFilter, findFilter) } else { r2 = ret.Error(2) } @@ -342,13 +344,13 @@ func (_m *StudioReaderWriter) Query(studioFilter *models.StudioFilterType, findF return r0, r1, r2 } -// QueryForAutoTag provides a mock function with given fields: words -func (_m *StudioReaderWriter) QueryForAutoTag(words []string) ([]*models.Studio, error) { - ret := _m.Called(words) +// QueryForAutoTag provides a mock function with given fields: ctx, words +func (_m *StudioReaderWriter) QueryForAutoTag(ctx context.Context, words []string) ([]*models.Studio, error) { + ret := _m.Called(ctx, words) var r0 []*models.Studio - if rf, ok := ret.Get(0).(func([]string) []*models.Studio); ok { - r0 = rf(words) + if rf, ok := ret.Get(0).(func(context.Context, []string) []*models.Studio); ok { + r0 = rf(ctx, words) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Studio) @@ -356,8 +358,8 @@ func (_m *StudioReaderWriter) QueryForAutoTag(words []string) ([]*models.Studio, } var r1 error - if rf, ok := ret.Get(1).(func([]string) error); ok { - r1 = rf(words) + if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { + r1 = rf(ctx, words) } else { r1 = ret.Error(1) } @@ -365,13 +367,13 @@ func (_m *StudioReaderWriter) QueryForAutoTag(words []string) ([]*models.Studio, return r0, r1 } -// Update provides a mock function with given fields: updatedStudio -func (_m *StudioReaderWriter) Update(updatedStudio models.StudioPartial) (*models.Studio, error) { - ret := _m.Called(updatedStudio) +// Update provides a mock function with given fields: ctx, updatedStudio +func (_m *StudioReaderWriter) Update(ctx context.Context, updatedStudio models.StudioPartial) (*models.Studio, error) { + ret := _m.Called(ctx, updatedStudio) var r0 *models.Studio - if rf, ok := ret.Get(0).(func(models.StudioPartial) *models.Studio); ok { - r0 = rf(updatedStudio) + if rf, ok := ret.Get(0).(func(context.Context, models.StudioPartial) *models.Studio); ok { + r0 = rf(ctx, updatedStudio) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Studio) @@ -379,8 +381,8 @@ func (_m *StudioReaderWriter) Update(updatedStudio models.StudioPartial) (*model } var r1 error - if rf, ok := ret.Get(1).(func(models.StudioPartial) error); ok { - r1 = rf(updatedStudio) + if rf, ok := ret.Get(1).(func(context.Context, models.StudioPartial) error); ok { + r1 = rf(ctx, updatedStudio) } else { r1 = ret.Error(1) } @@ -388,13 +390,13 @@ func (_m *StudioReaderWriter) Update(updatedStudio models.StudioPartial) (*model return r0, r1 } -// UpdateAliases provides a mock function with given fields: studioID, aliases -func (_m *StudioReaderWriter) UpdateAliases(studioID int, aliases []string) error { - ret := _m.Called(studioID, aliases) +// UpdateAliases provides a mock function with given fields: ctx, studioID, aliases +func (_m *StudioReaderWriter) UpdateAliases(ctx context.Context, studioID int, aliases []string) error { + ret := _m.Called(ctx, studioID, aliases) var r0 error - if rf, ok := ret.Get(0).(func(int, []string) error); ok { - r0 = rf(studioID, aliases) + if rf, ok := ret.Get(0).(func(context.Context, int, []string) error); ok { + r0 = rf(ctx, studioID, aliases) } else { r0 = ret.Error(0) } @@ -402,13 +404,13 @@ func (_m *StudioReaderWriter) UpdateAliases(studioID int, aliases []string) erro return r0 } -// UpdateFull provides a mock function with given fields: updatedStudio -func (_m *StudioReaderWriter) UpdateFull(updatedStudio models.Studio) (*models.Studio, error) { - ret := _m.Called(updatedStudio) +// UpdateFull provides a mock function with given fields: ctx, updatedStudio +func (_m *StudioReaderWriter) UpdateFull(ctx context.Context, updatedStudio models.Studio) (*models.Studio, error) { + ret := _m.Called(ctx, updatedStudio) var r0 *models.Studio - if rf, ok := ret.Get(0).(func(models.Studio) *models.Studio); ok { - r0 = rf(updatedStudio) + if rf, ok := ret.Get(0).(func(context.Context, models.Studio) *models.Studio); ok { + r0 = rf(ctx, updatedStudio) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Studio) @@ -416,8 +418,8 @@ func (_m *StudioReaderWriter) UpdateFull(updatedStudio models.Studio) (*models.S } var r1 error - if rf, ok := ret.Get(1).(func(models.Studio) error); ok { - r1 = rf(updatedStudio) + if rf, ok := ret.Get(1).(func(context.Context, models.Studio) error); ok { + r1 = rf(ctx, updatedStudio) } else { r1 = ret.Error(1) } @@ -425,13 +427,13 @@ func (_m *StudioReaderWriter) UpdateFull(updatedStudio models.Studio) (*models.S return r0, r1 } -// UpdateImage provides a mock function with given fields: studioID, image -func (_m *StudioReaderWriter) UpdateImage(studioID int, image []byte) error { - ret := _m.Called(studioID, image) +// UpdateImage provides a mock function with given fields: ctx, studioID, image +func (_m *StudioReaderWriter) UpdateImage(ctx context.Context, studioID int, image []byte) error { + ret := _m.Called(ctx, studioID, image) var r0 error - if rf, ok := ret.Get(0).(func(int, []byte) error); ok { - r0 = rf(studioID, image) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) error); ok { + r0 = rf(ctx, studioID, image) } else { r0 = ret.Error(0) } @@ -439,13 +441,13 @@ func (_m *StudioReaderWriter) UpdateImage(studioID int, image []byte) error { return r0 } -// UpdateStashIDs provides a mock function with given fields: studioID, stashIDs -func (_m *StudioReaderWriter) UpdateStashIDs(studioID int, stashIDs []models.StashID) error { - ret := _m.Called(studioID, stashIDs) +// UpdateStashIDs provides a mock function with given fields: ctx, studioID, stashIDs +func (_m *StudioReaderWriter) UpdateStashIDs(ctx context.Context, studioID int, stashIDs []models.StashID) error { + ret := _m.Called(ctx, studioID, stashIDs) var r0 error - if rf, ok := ret.Get(0).(func(int, []models.StashID) error); ok { - r0 = rf(studioID, stashIDs) + if rf, ok := ret.Get(0).(func(context.Context, int, []models.StashID) error); ok { + r0 = rf(ctx, studioID, stashIDs) } else { r0 = ret.Error(0) } diff --git a/pkg/models/mocks/TagReaderWriter.go b/pkg/models/mocks/TagReaderWriter.go index 64a8088a6..1a53adf05 100644 --- a/pkg/models/mocks/TagReaderWriter.go +++ b/pkg/models/mocks/TagReaderWriter.go @@ -3,6 +3,8 @@ package mocks import ( + context "context" + models "github.com/stashapp/stash/pkg/models" mock "github.com/stretchr/testify/mock" ) @@ -12,13 +14,13 @@ type TagReaderWriter struct { mock.Mock } -// All provides a mock function with given fields: -func (_m *TagReaderWriter) All() ([]*models.Tag, error) { - ret := _m.Called() +// All provides a mock function with given fields: ctx +func (_m *TagReaderWriter) All(ctx context.Context) ([]*models.Tag, error) { + ret := _m.Called(ctx) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func() []*models.Tag); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []*models.Tag); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -26,8 +28,8 @@ func (_m *TagReaderWriter) All() ([]*models.Tag, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -35,20 +37,20 @@ func (_m *TagReaderWriter) All() ([]*models.Tag, error) { return r0, r1 } -// Count provides a mock function with given fields: -func (_m *TagReaderWriter) Count() (int, error) { - ret := _m.Called() +// Count provides a mock function with given fields: ctx +func (_m *TagReaderWriter) Count(ctx context.Context) (int, error) { + ret := _m.Called(ctx) var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(int) } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -56,13 +58,13 @@ func (_m *TagReaderWriter) Count() (int, error) { return r0, r1 } -// Create provides a mock function with given fields: newTag -func (_m *TagReaderWriter) Create(newTag models.Tag) (*models.Tag, error) { - ret := _m.Called(newTag) +// Create provides a mock function with given fields: ctx, newTag +func (_m *TagReaderWriter) Create(ctx context.Context, newTag models.Tag) (*models.Tag, error) { + ret := _m.Called(ctx, newTag) var r0 *models.Tag - if rf, ok := ret.Get(0).(func(models.Tag) *models.Tag); ok { - r0 = rf(newTag) + if rf, ok := ret.Get(0).(func(context.Context, models.Tag) *models.Tag); ok { + r0 = rf(ctx, newTag) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Tag) @@ -70,8 +72,8 @@ func (_m *TagReaderWriter) Create(newTag models.Tag) (*models.Tag, error) { } var r1 error - if rf, ok := ret.Get(1).(func(models.Tag) error); ok { - r1 = rf(newTag) + if rf, ok := ret.Get(1).(func(context.Context, models.Tag) error); ok { + r1 = rf(ctx, newTag) } else { r1 = ret.Error(1) } @@ -79,13 +81,13 @@ func (_m *TagReaderWriter) Create(newTag models.Tag) (*models.Tag, error) { return r0, r1 } -// Destroy provides a mock function with given fields: id -func (_m *TagReaderWriter) Destroy(id int) error { - ret := _m.Called(id) +// Destroy provides a mock function with given fields: ctx, id +func (_m *TagReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) } else { r0 = ret.Error(0) } @@ -93,13 +95,13 @@ func (_m *TagReaderWriter) Destroy(id int) error { return r0 } -// DestroyImage provides a mock function with given fields: tagID -func (_m *TagReaderWriter) DestroyImage(tagID int) error { - ret := _m.Called(tagID) +// DestroyImage provides a mock function with given fields: ctx, tagID +func (_m *TagReaderWriter) DestroyImage(ctx context.Context, tagID int) error { + ret := _m.Called(ctx, tagID) var r0 error - if rf, ok := ret.Get(0).(func(int) error); ok { - r0 = rf(tagID) + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, tagID) } else { r0 = ret.Error(0) } @@ -107,13 +109,13 @@ func (_m *TagReaderWriter) DestroyImage(tagID int) error { return r0 } -// Find provides a mock function with given fields: id -func (_m *TagReaderWriter) Find(id int) (*models.Tag, error) { - ret := _m.Called(id) +// Find provides a mock function with given fields: ctx, id +func (_m *TagReaderWriter) Find(ctx context.Context, id int) (*models.Tag, error) { + ret := _m.Called(ctx, id) var r0 *models.Tag - if rf, ok := ret.Get(0).(func(int) *models.Tag); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(context.Context, int) *models.Tag); ok { + r0 = rf(ctx, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Tag) @@ -121,8 +123,8 @@ func (_m *TagReaderWriter) Find(id int) (*models.Tag, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) } else { r1 = ret.Error(1) } @@ -130,13 +132,13 @@ func (_m *TagReaderWriter) Find(id int) (*models.Tag, error) { return r0, r1 } -// FindAllAncestors provides a mock function with given fields: tagID, excludeIDs -func (_m *TagReaderWriter) FindAllAncestors(tagID int, excludeIDs []int) ([]*models.TagPath, error) { - ret := _m.Called(tagID, excludeIDs) +// FindAllAncestors provides a mock function with given fields: ctx, tagID, excludeIDs +func (_m *TagReaderWriter) FindAllAncestors(ctx context.Context, tagID int, excludeIDs []int) ([]*models.TagPath, error) { + ret := _m.Called(ctx, tagID, excludeIDs) var r0 []*models.TagPath - if rf, ok := ret.Get(0).(func(int, []int) []*models.TagPath); ok { - r0 = rf(tagID, excludeIDs) + if rf, ok := ret.Get(0).(func(context.Context, int, []int) []*models.TagPath); ok { + r0 = rf(ctx, tagID, excludeIDs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.TagPath) @@ -144,8 +146,8 @@ func (_m *TagReaderWriter) FindAllAncestors(tagID int, excludeIDs []int) ([]*mod } var r1 error - if rf, ok := ret.Get(1).(func(int, []int) error); ok { - r1 = rf(tagID, excludeIDs) + if rf, ok := ret.Get(1).(func(context.Context, int, []int) error); ok { + r1 = rf(ctx, tagID, excludeIDs) } else { r1 = ret.Error(1) } @@ -153,13 +155,13 @@ func (_m *TagReaderWriter) FindAllAncestors(tagID int, excludeIDs []int) ([]*mod return r0, r1 } -// FindAllDescendants provides a mock function with given fields: tagID, excludeIDs -func (_m *TagReaderWriter) FindAllDescendants(tagID int, excludeIDs []int) ([]*models.TagPath, error) { - ret := _m.Called(tagID, excludeIDs) +// FindAllDescendants provides a mock function with given fields: ctx, tagID, excludeIDs +func (_m *TagReaderWriter) FindAllDescendants(ctx context.Context, tagID int, excludeIDs []int) ([]*models.TagPath, error) { + ret := _m.Called(ctx, tagID, excludeIDs) var r0 []*models.TagPath - if rf, ok := ret.Get(0).(func(int, []int) []*models.TagPath); ok { - r0 = rf(tagID, excludeIDs) + if rf, ok := ret.Get(0).(func(context.Context, int, []int) []*models.TagPath); ok { + r0 = rf(ctx, tagID, excludeIDs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.TagPath) @@ -167,8 +169,8 @@ func (_m *TagReaderWriter) FindAllDescendants(tagID int, excludeIDs []int) ([]*m } var r1 error - if rf, ok := ret.Get(1).(func(int, []int) error); ok { - r1 = rf(tagID, excludeIDs) + if rf, ok := ret.Get(1).(func(context.Context, int, []int) error); ok { + r1 = rf(ctx, tagID, excludeIDs) } else { r1 = ret.Error(1) } @@ -176,13 +178,13 @@ func (_m *TagReaderWriter) FindAllDescendants(tagID int, excludeIDs []int) ([]*m return r0, r1 } -// FindByChildTagID provides a mock function with given fields: childID -func (_m *TagReaderWriter) FindByChildTagID(childID int) ([]*models.Tag, error) { - ret := _m.Called(childID) +// FindByChildTagID provides a mock function with given fields: ctx, childID +func (_m *TagReaderWriter) FindByChildTagID(ctx context.Context, childID int) ([]*models.Tag, error) { + ret := _m.Called(ctx, childID) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func(int) []*models.Tag); ok { - r0 = rf(childID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Tag); ok { + r0 = rf(ctx, childID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -190,8 +192,8 @@ func (_m *TagReaderWriter) FindByChildTagID(childID int) ([]*models.Tag, error) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(childID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, childID) } else { r1 = ret.Error(1) } @@ -199,13 +201,13 @@ func (_m *TagReaderWriter) FindByChildTagID(childID int) ([]*models.Tag, error) return r0, r1 } -// FindByGalleryID provides a mock function with given fields: galleryID -func (_m *TagReaderWriter) FindByGalleryID(galleryID int) ([]*models.Tag, error) { - ret := _m.Called(galleryID) +// FindByGalleryID provides a mock function with given fields: ctx, galleryID +func (_m *TagReaderWriter) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Tag, error) { + ret := _m.Called(ctx, galleryID) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func(int) []*models.Tag); ok { - r0 = rf(galleryID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Tag); ok { + r0 = rf(ctx, galleryID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -213,8 +215,8 @@ func (_m *TagReaderWriter) FindByGalleryID(galleryID int) ([]*models.Tag, error) } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(galleryID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, galleryID) } else { r1 = ret.Error(1) } @@ -222,13 +224,13 @@ func (_m *TagReaderWriter) FindByGalleryID(galleryID int) ([]*models.Tag, error) return r0, r1 } -// FindByImageID provides a mock function with given fields: imageID -func (_m *TagReaderWriter) FindByImageID(imageID int) ([]*models.Tag, error) { - ret := _m.Called(imageID) +// FindByImageID provides a mock function with given fields: ctx, imageID +func (_m *TagReaderWriter) FindByImageID(ctx context.Context, imageID int) ([]*models.Tag, error) { + ret := _m.Called(ctx, imageID) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func(int) []*models.Tag); ok { - r0 = rf(imageID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Tag); ok { + r0 = rf(ctx, imageID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -236,8 +238,8 @@ func (_m *TagReaderWriter) FindByImageID(imageID int) ([]*models.Tag, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(imageID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, imageID) } else { r1 = ret.Error(1) } @@ -245,13 +247,13 @@ func (_m *TagReaderWriter) FindByImageID(imageID int) ([]*models.Tag, error) { return r0, r1 } -// FindByName provides a mock function with given fields: name, nocase -func (_m *TagReaderWriter) FindByName(name string, nocase bool) (*models.Tag, error) { - ret := _m.Called(name, nocase) +// FindByName provides a mock function with given fields: ctx, name, nocase +func (_m *TagReaderWriter) FindByName(ctx context.Context, name string, nocase bool) (*models.Tag, error) { + ret := _m.Called(ctx, name, nocase) var r0 *models.Tag - if rf, ok := ret.Get(0).(func(string, bool) *models.Tag); ok { - r0 = rf(name, nocase) + if rf, ok := ret.Get(0).(func(context.Context, string, bool) *models.Tag); ok { + r0 = rf(ctx, name, nocase) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Tag) @@ -259,8 +261,8 @@ func (_m *TagReaderWriter) FindByName(name string, nocase bool) (*models.Tag, er } var r1 error - if rf, ok := ret.Get(1).(func(string, bool) error); ok { - r1 = rf(name, nocase) + if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { + r1 = rf(ctx, name, nocase) } else { r1 = ret.Error(1) } @@ -268,13 +270,13 @@ func (_m *TagReaderWriter) FindByName(name string, nocase bool) (*models.Tag, er return r0, r1 } -// FindByNames provides a mock function with given fields: names, nocase -func (_m *TagReaderWriter) FindByNames(names []string, nocase bool) ([]*models.Tag, error) { - ret := _m.Called(names, nocase) +// FindByNames provides a mock function with given fields: ctx, names, nocase +func (_m *TagReaderWriter) FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Tag, error) { + ret := _m.Called(ctx, names, nocase) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func([]string, bool) []*models.Tag); ok { - r0 = rf(names, nocase) + if rf, ok := ret.Get(0).(func(context.Context, []string, bool) []*models.Tag); ok { + r0 = rf(ctx, names, nocase) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -282,8 +284,8 @@ func (_m *TagReaderWriter) FindByNames(names []string, nocase bool) ([]*models.T } var r1 error - if rf, ok := ret.Get(1).(func([]string, bool) error); ok { - r1 = rf(names, nocase) + if rf, ok := ret.Get(1).(func(context.Context, []string, bool) error); ok { + r1 = rf(ctx, names, nocase) } else { r1 = ret.Error(1) } @@ -291,13 +293,13 @@ func (_m *TagReaderWriter) FindByNames(names []string, nocase bool) ([]*models.T return r0, r1 } -// FindByParentTagID provides a mock function with given fields: parentID -func (_m *TagReaderWriter) FindByParentTagID(parentID int) ([]*models.Tag, error) { - ret := _m.Called(parentID) +// FindByParentTagID provides a mock function with given fields: ctx, parentID +func (_m *TagReaderWriter) FindByParentTagID(ctx context.Context, parentID int) ([]*models.Tag, error) { + ret := _m.Called(ctx, parentID) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func(int) []*models.Tag); ok { - r0 = rf(parentID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Tag); ok { + r0 = rf(ctx, parentID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -305,8 +307,8 @@ func (_m *TagReaderWriter) FindByParentTagID(parentID int) ([]*models.Tag, error } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(parentID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, parentID) } else { r1 = ret.Error(1) } @@ -314,13 +316,13 @@ func (_m *TagReaderWriter) FindByParentTagID(parentID int) ([]*models.Tag, error return r0, r1 } -// FindByPerformerID provides a mock function with given fields: performerID -func (_m *TagReaderWriter) FindByPerformerID(performerID int) ([]*models.Tag, error) { - ret := _m.Called(performerID) +// FindByPerformerID provides a mock function with given fields: ctx, performerID +func (_m *TagReaderWriter) FindByPerformerID(ctx context.Context, performerID int) ([]*models.Tag, error) { + ret := _m.Called(ctx, performerID) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func(int) []*models.Tag); ok { - r0 = rf(performerID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Tag); ok { + r0 = rf(ctx, performerID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -328,8 +330,8 @@ func (_m *TagReaderWriter) FindByPerformerID(performerID int) ([]*models.Tag, er } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(performerID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) } else { r1 = ret.Error(1) } @@ -337,13 +339,13 @@ func (_m *TagReaderWriter) FindByPerformerID(performerID int) ([]*models.Tag, er return r0, r1 } -// FindBySceneID provides a mock function with given fields: sceneID -func (_m *TagReaderWriter) FindBySceneID(sceneID int) ([]*models.Tag, error) { - ret := _m.Called(sceneID) +// FindBySceneID provides a mock function with given fields: ctx, sceneID +func (_m *TagReaderWriter) FindBySceneID(ctx context.Context, sceneID int) ([]*models.Tag, error) { + ret := _m.Called(ctx, sceneID) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func(int) []*models.Tag); ok { - r0 = rf(sceneID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Tag); ok { + r0 = rf(ctx, sceneID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -351,8 +353,8 @@ func (_m *TagReaderWriter) FindBySceneID(sceneID int) ([]*models.Tag, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, sceneID) } else { r1 = ret.Error(1) } @@ -360,13 +362,13 @@ func (_m *TagReaderWriter) FindBySceneID(sceneID int) ([]*models.Tag, error) { return r0, r1 } -// FindBySceneMarkerID provides a mock function with given fields: sceneMarkerID -func (_m *TagReaderWriter) FindBySceneMarkerID(sceneMarkerID int) ([]*models.Tag, error) { - ret := _m.Called(sceneMarkerID) +// FindBySceneMarkerID provides a mock function with given fields: ctx, sceneMarkerID +func (_m *TagReaderWriter) FindBySceneMarkerID(ctx context.Context, sceneMarkerID int) ([]*models.Tag, error) { + ret := _m.Called(ctx, sceneMarkerID) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func(int) []*models.Tag); ok { - r0 = rf(sceneMarkerID) + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Tag); ok { + r0 = rf(ctx, sceneMarkerID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -374,8 +376,8 @@ func (_m *TagReaderWriter) FindBySceneMarkerID(sceneMarkerID int) ([]*models.Tag } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(sceneMarkerID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, sceneMarkerID) } else { r1 = ret.Error(1) } @@ -383,13 +385,13 @@ func (_m *TagReaderWriter) FindBySceneMarkerID(sceneMarkerID int) ([]*models.Tag return r0, r1 } -// FindMany provides a mock function with given fields: ids -func (_m *TagReaderWriter) FindMany(ids []int) ([]*models.Tag, error) { - ret := _m.Called(ids) +// FindMany provides a mock function with given fields: ctx, ids +func (_m *TagReaderWriter) FindMany(ctx context.Context, ids []int) ([]*models.Tag, error) { + ret := _m.Called(ctx, ids) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func([]int) []*models.Tag); ok { - r0 = rf(ids) + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.Tag); ok { + r0 = rf(ctx, ids) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -397,8 +399,8 @@ func (_m *TagReaderWriter) FindMany(ids []int) ([]*models.Tag, error) { } var r1 error - if rf, ok := ret.Get(1).(func([]int) error); ok { - r1 = rf(ids) + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) } else { r1 = ret.Error(1) } @@ -406,13 +408,13 @@ func (_m *TagReaderWriter) FindMany(ids []int) ([]*models.Tag, error) { return r0, r1 } -// GetAliases provides a mock function with given fields: tagID -func (_m *TagReaderWriter) GetAliases(tagID int) ([]string, error) { - ret := _m.Called(tagID) +// GetAliases provides a mock function with given fields: ctx, tagID +func (_m *TagReaderWriter) GetAliases(ctx context.Context, tagID int) ([]string, error) { + ret := _m.Called(ctx, tagID) var r0 []string - if rf, ok := ret.Get(0).(func(int) []string); ok { - r0 = rf(tagID) + if rf, ok := ret.Get(0).(func(context.Context, int) []string); ok { + r0 = rf(ctx, tagID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]string) @@ -420,8 +422,8 @@ func (_m *TagReaderWriter) GetAliases(tagID int) ([]string, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(tagID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, tagID) } else { r1 = ret.Error(1) } @@ -429,13 +431,13 @@ func (_m *TagReaderWriter) GetAliases(tagID int) ([]string, error) { return r0, r1 } -// GetImage provides a mock function with given fields: tagID -func (_m *TagReaderWriter) GetImage(tagID int) ([]byte, error) { - ret := _m.Called(tagID) +// GetImage provides a mock function with given fields: ctx, tagID +func (_m *TagReaderWriter) GetImage(ctx context.Context, tagID int) ([]byte, error) { + ret := _m.Called(ctx, tagID) var r0 []byte - if rf, ok := ret.Get(0).(func(int) []byte); ok { - r0 = rf(tagID) + if rf, ok := ret.Get(0).(func(context.Context, int) []byte); ok { + r0 = rf(ctx, tagID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]byte) @@ -443,8 +445,8 @@ func (_m *TagReaderWriter) GetImage(tagID int) ([]byte, error) { } var r1 error - if rf, ok := ret.Get(1).(func(int) error); ok { - r1 = rf(tagID) + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, tagID) } else { r1 = ret.Error(1) } @@ -452,13 +454,13 @@ func (_m *TagReaderWriter) GetImage(tagID int) ([]byte, error) { return r0, r1 } -// Merge provides a mock function with given fields: source, destination -func (_m *TagReaderWriter) Merge(source []int, destination int) error { - ret := _m.Called(source, destination) +// Merge provides a mock function with given fields: ctx, source, destination +func (_m *TagReaderWriter) Merge(ctx context.Context, source []int, destination int) error { + ret := _m.Called(ctx, source, destination) var r0 error - if rf, ok := ret.Get(0).(func([]int, int) error); ok { - r0 = rf(source, destination) + if rf, ok := ret.Get(0).(func(context.Context, []int, int) error); ok { + r0 = rf(ctx, source, destination) } else { r0 = ret.Error(0) } @@ -466,13 +468,13 @@ func (_m *TagReaderWriter) Merge(source []int, destination int) error { return r0 } -// Query provides a mock function with given fields: tagFilter, findFilter -func (_m *TagReaderWriter) Query(tagFilter *models.TagFilterType, findFilter *models.FindFilterType) ([]*models.Tag, int, error) { - ret := _m.Called(tagFilter, findFilter) +// Query provides a mock function with given fields: ctx, tagFilter, findFilter +func (_m *TagReaderWriter) Query(ctx context.Context, tagFilter *models.TagFilterType, findFilter *models.FindFilterType) ([]*models.Tag, int, error) { + ret := _m.Called(ctx, tagFilter, findFilter) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func(*models.TagFilterType, *models.FindFilterType) []*models.Tag); ok { - r0 = rf(tagFilter, findFilter) + if rf, ok := ret.Get(0).(func(context.Context, *models.TagFilterType, *models.FindFilterType) []*models.Tag); ok { + r0 = rf(ctx, tagFilter, findFilter) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -480,15 +482,15 @@ func (_m *TagReaderWriter) Query(tagFilter *models.TagFilterType, findFilter *mo } var r1 int - if rf, ok := ret.Get(1).(func(*models.TagFilterType, *models.FindFilterType) int); ok { - r1 = rf(tagFilter, findFilter) + if rf, ok := ret.Get(1).(func(context.Context, *models.TagFilterType, *models.FindFilterType) int); ok { + r1 = rf(ctx, tagFilter, findFilter) } else { r1 = ret.Get(1).(int) } var r2 error - if rf, ok := ret.Get(2).(func(*models.TagFilterType, *models.FindFilterType) error); ok { - r2 = rf(tagFilter, findFilter) + if rf, ok := ret.Get(2).(func(context.Context, *models.TagFilterType, *models.FindFilterType) error); ok { + r2 = rf(ctx, tagFilter, findFilter) } else { r2 = ret.Error(2) } @@ -496,13 +498,13 @@ func (_m *TagReaderWriter) Query(tagFilter *models.TagFilterType, findFilter *mo return r0, r1, r2 } -// QueryForAutoTag provides a mock function with given fields: words -func (_m *TagReaderWriter) QueryForAutoTag(words []string) ([]*models.Tag, error) { - ret := _m.Called(words) +// QueryForAutoTag provides a mock function with given fields: ctx, words +func (_m *TagReaderWriter) QueryForAutoTag(ctx context.Context, words []string) ([]*models.Tag, error) { + ret := _m.Called(ctx, words) var r0 []*models.Tag - if rf, ok := ret.Get(0).(func([]string) []*models.Tag); ok { - r0 = rf(words) + if rf, ok := ret.Get(0).(func(context.Context, []string) []*models.Tag); ok { + r0 = rf(ctx, words) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*models.Tag) @@ -510,8 +512,8 @@ func (_m *TagReaderWriter) QueryForAutoTag(words []string) ([]*models.Tag, error } var r1 error - if rf, ok := ret.Get(1).(func([]string) error); ok { - r1 = rf(words) + if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok { + r1 = rf(ctx, words) } else { r1 = ret.Error(1) } @@ -519,13 +521,13 @@ func (_m *TagReaderWriter) QueryForAutoTag(words []string) ([]*models.Tag, error return r0, r1 } -// Update provides a mock function with given fields: updateTag -func (_m *TagReaderWriter) Update(updateTag models.TagPartial) (*models.Tag, error) { - ret := _m.Called(updateTag) +// Update provides a mock function with given fields: ctx, updateTag +func (_m *TagReaderWriter) Update(ctx context.Context, updateTag models.TagPartial) (*models.Tag, error) { + ret := _m.Called(ctx, updateTag) var r0 *models.Tag - if rf, ok := ret.Get(0).(func(models.TagPartial) *models.Tag); ok { - r0 = rf(updateTag) + if rf, ok := ret.Get(0).(func(context.Context, models.TagPartial) *models.Tag); ok { + r0 = rf(ctx, updateTag) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Tag) @@ -533,8 +535,8 @@ func (_m *TagReaderWriter) Update(updateTag models.TagPartial) (*models.Tag, err } var r1 error - if rf, ok := ret.Get(1).(func(models.TagPartial) error); ok { - r1 = rf(updateTag) + if rf, ok := ret.Get(1).(func(context.Context, models.TagPartial) error); ok { + r1 = rf(ctx, updateTag) } else { r1 = ret.Error(1) } @@ -542,13 +544,13 @@ func (_m *TagReaderWriter) Update(updateTag models.TagPartial) (*models.Tag, err return r0, r1 } -// UpdateAliases provides a mock function with given fields: tagID, aliases -func (_m *TagReaderWriter) UpdateAliases(tagID int, aliases []string) error { - ret := _m.Called(tagID, aliases) +// UpdateAliases provides a mock function with given fields: ctx, tagID, aliases +func (_m *TagReaderWriter) UpdateAliases(ctx context.Context, tagID int, aliases []string) error { + ret := _m.Called(ctx, tagID, aliases) var r0 error - if rf, ok := ret.Get(0).(func(int, []string) error); ok { - r0 = rf(tagID, aliases) + if rf, ok := ret.Get(0).(func(context.Context, int, []string) error); ok { + r0 = rf(ctx, tagID, aliases) } else { r0 = ret.Error(0) } @@ -556,13 +558,13 @@ func (_m *TagReaderWriter) UpdateAliases(tagID int, aliases []string) error { return r0 } -// UpdateChildTags provides a mock function with given fields: tagID, parentIDs -func (_m *TagReaderWriter) UpdateChildTags(tagID int, parentIDs []int) error { - ret := _m.Called(tagID, parentIDs) +// UpdateChildTags provides a mock function with given fields: ctx, tagID, parentIDs +func (_m *TagReaderWriter) UpdateChildTags(ctx context.Context, tagID int, parentIDs []int) error { + ret := _m.Called(ctx, tagID, parentIDs) var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(tagID, parentIDs) + if rf, ok := ret.Get(0).(func(context.Context, int, []int) error); ok { + r0 = rf(ctx, tagID, parentIDs) } else { r0 = ret.Error(0) } @@ -570,13 +572,13 @@ func (_m *TagReaderWriter) UpdateChildTags(tagID int, parentIDs []int) error { return r0 } -// UpdateFull provides a mock function with given fields: updatedTag -func (_m *TagReaderWriter) UpdateFull(updatedTag models.Tag) (*models.Tag, error) { - ret := _m.Called(updatedTag) +// UpdateFull provides a mock function with given fields: ctx, updatedTag +func (_m *TagReaderWriter) UpdateFull(ctx context.Context, updatedTag models.Tag) (*models.Tag, error) { + ret := _m.Called(ctx, updatedTag) var r0 *models.Tag - if rf, ok := ret.Get(0).(func(models.Tag) *models.Tag); ok { - r0 = rf(updatedTag) + if rf, ok := ret.Get(0).(func(context.Context, models.Tag) *models.Tag); ok { + r0 = rf(ctx, updatedTag) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*models.Tag) @@ -584,8 +586,8 @@ func (_m *TagReaderWriter) UpdateFull(updatedTag models.Tag) (*models.Tag, error } var r1 error - if rf, ok := ret.Get(1).(func(models.Tag) error); ok { - r1 = rf(updatedTag) + if rf, ok := ret.Get(1).(func(context.Context, models.Tag) error); ok { + r1 = rf(ctx, updatedTag) } else { r1 = ret.Error(1) } @@ -593,13 +595,13 @@ func (_m *TagReaderWriter) UpdateFull(updatedTag models.Tag) (*models.Tag, error return r0, r1 } -// UpdateImage provides a mock function with given fields: tagID, image -func (_m *TagReaderWriter) UpdateImage(tagID int, image []byte) error { - ret := _m.Called(tagID, image) +// UpdateImage provides a mock function with given fields: ctx, tagID, image +func (_m *TagReaderWriter) UpdateImage(ctx context.Context, tagID int, image []byte) error { + ret := _m.Called(ctx, tagID, image) var r0 error - if rf, ok := ret.Get(0).(func(int, []byte) error); ok { - r0 = rf(tagID, image) + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) error); ok { + r0 = rf(ctx, tagID, image) } else { r0 = ret.Error(0) } @@ -607,13 +609,13 @@ func (_m *TagReaderWriter) UpdateImage(tagID int, image []byte) error { return r0 } -// UpdateParentTags provides a mock function with given fields: tagID, parentIDs -func (_m *TagReaderWriter) UpdateParentTags(tagID int, parentIDs []int) error { - ret := _m.Called(tagID, parentIDs) +// UpdateParentTags provides a mock function with given fields: ctx, tagID, parentIDs +func (_m *TagReaderWriter) UpdateParentTags(ctx context.Context, tagID int, parentIDs []int) error { + ret := _m.Called(ctx, tagID, parentIDs) var r0 error - if rf, ok := ret.Get(0).(func(int, []int) error); ok { - r0 = rf(tagID, parentIDs) + if rf, ok := ret.Get(0).(func(context.Context, int, []int) error); ok { + r0 = rf(ctx, tagID, parentIDs) } else { r0 = ret.Error(0) } diff --git a/pkg/models/mocks/query.go b/pkg/models/mocks/query.go index 152335fc2..346bd1e55 100644 --- a/pkg/models/mocks/query.go +++ b/pkg/models/mocks/query.go @@ -1,16 +1,20 @@ package mocks -import "github.com/stashapp/stash/pkg/models" +import ( + context "context" + + "github.com/stashapp/stash/pkg/models" +) type sceneResolver struct { scenes []*models.Scene } -func (s *sceneResolver) Find(id int) (*models.Scene, error) { +func (s *sceneResolver) Find(ctx context.Context, id int) (*models.Scene, error) { panic("not implemented") } -func (s *sceneResolver) FindMany(ids []int) ([]*models.Scene, error) { +func (s *sceneResolver) FindMany(ctx context.Context, ids []int) ([]*models.Scene, error) { return s.scenes, nil } @@ -27,7 +31,7 @@ type imageResolver struct { images []*models.Image } -func (s *imageResolver) FindMany(ids []int) ([]*models.Image, error) { +func (s *imageResolver) FindMany(ctx context.Context, ids []int) ([]*models.Image, error) { return s.images, nil } diff --git a/pkg/models/mocks/transaction.go b/pkg/models/mocks/transaction.go index 886fef7d6..0690ae419 100644 --- a/pkg/models/mocks/transaction.go +++ b/pkg/models/mocks/transaction.go @@ -1,167 +1,56 @@ package mocks import ( - "context" + context "context" - models "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" ) -type TransactionManager struct { - gallery *GalleryReaderWriter - image *ImageReaderWriter - movie *MovieReaderWriter - performer *PerformerReaderWriter - scene *SceneReaderWriter - sceneMarker *SceneMarkerReaderWriter - scrapedItem *ScrapedItemReaderWriter - studio *StudioReaderWriter - tag *TagReaderWriter - savedFilter *SavedFilterReaderWriter +type TxnManager struct{} + +func (*TxnManager) Begin(ctx context.Context) (context.Context, error) { + return ctx, nil } -func NewTransactionManager() *TransactionManager { - return &TransactionManager{ - gallery: &GalleryReaderWriter{}, - image: &ImageReaderWriter{}, - movie: &MovieReaderWriter{}, - performer: &PerformerReaderWriter{}, - scene: &SceneReaderWriter{}, - sceneMarker: &SceneMarkerReaderWriter{}, - scrapedItem: &ScrapedItemReaderWriter{}, - studio: &StudioReaderWriter{}, - tag: &TagReaderWriter{}, - savedFilter: &SavedFilterReaderWriter{}, +func (*TxnManager) WithDatabase(ctx context.Context) (context.Context, error) { + return ctx, nil +} + +func (*TxnManager) Commit(ctx context.Context) error { + return nil +} + +func (*TxnManager) Rollback(ctx context.Context) error { + return nil +} + +func (*TxnManager) AddPostCommitHook(ctx context.Context, hook txn.TxnFunc) { +} + +func (*TxnManager) AddPostRollbackHook(ctx context.Context, hook txn.TxnFunc) { +} + +func (*TxnManager) IsLocked(err error) bool { + return false +} + +func (*TxnManager) Reset() error { + return nil +} + +func NewTxnRepository() models.Repository { + return models.Repository{ + TxnManager: &TxnManager{}, + Gallery: &GalleryReaderWriter{}, + Image: &ImageReaderWriter{}, + Movie: &MovieReaderWriter{}, + Performer: &PerformerReaderWriter{}, + Scene: &SceneReaderWriter{}, + SceneMarker: &SceneMarkerReaderWriter{}, + ScrapedItem: &ScrapedItemReaderWriter{}, + Studio: &StudioReaderWriter{}, + Tag: &TagReaderWriter{}, + SavedFilter: &SavedFilterReaderWriter{}, } } - -func (t *TransactionManager) WithTxn(ctx context.Context, fn func(r models.Repository) error) error { - return fn(t) -} - -func (t *TransactionManager) GalleryMock() *GalleryReaderWriter { - return t.gallery -} - -func (t *TransactionManager) ImageMock() *ImageReaderWriter { - return t.image -} - -func (t *TransactionManager) MovieMock() *MovieReaderWriter { - return t.movie -} - -func (t *TransactionManager) PerformerMock() *PerformerReaderWriter { - return t.performer -} - -func (t *TransactionManager) SceneMarkerMock() *SceneMarkerReaderWriter { - return t.sceneMarker -} - -func (t *TransactionManager) SceneMock() *SceneReaderWriter { - return t.scene -} - -func (t *TransactionManager) ScrapedItemMock() *ScrapedItemReaderWriter { - return t.scrapedItem -} - -func (t *TransactionManager) StudioMock() *StudioReaderWriter { - return t.studio -} - -func (t *TransactionManager) TagMock() *TagReaderWriter { - return t.tag -} - -func (t *TransactionManager) SavedFilterMock() *SavedFilterReaderWriter { - return t.savedFilter -} - -func (t *TransactionManager) Gallery() models.GalleryReaderWriter { - return t.GalleryMock() -} - -func (t *TransactionManager) Image() models.ImageReaderWriter { - return t.ImageMock() -} - -func (t *TransactionManager) Movie() models.MovieReaderWriter { - return t.MovieMock() -} - -func (t *TransactionManager) Performer() models.PerformerReaderWriter { - return t.PerformerMock() -} - -func (t *TransactionManager) SceneMarker() models.SceneMarkerReaderWriter { - return t.SceneMarkerMock() -} - -func (t *TransactionManager) Scene() models.SceneReaderWriter { - return t.SceneMock() -} - -func (t *TransactionManager) ScrapedItem() models.ScrapedItemReaderWriter { - return t.ScrapedItemMock() -} - -func (t *TransactionManager) Studio() models.StudioReaderWriter { - return t.StudioMock() -} - -func (t *TransactionManager) Tag() models.TagReaderWriter { - return t.TagMock() -} - -func (t *TransactionManager) SavedFilter() models.SavedFilterReaderWriter { - return t.SavedFilterMock() -} - -type ReadTransaction struct { - *TransactionManager -} - -func (t *TransactionManager) WithReadTxn(ctx context.Context, fn func(r models.ReaderRepository) error) error { - return fn(&ReadTransaction{t}) -} - -func (r *ReadTransaction) Gallery() models.GalleryReader { - return r.GalleryMock() -} - -func (r *ReadTransaction) Image() models.ImageReader { - return r.ImageMock() -} - -func (r *ReadTransaction) Movie() models.MovieReader { - return r.MovieMock() -} - -func (r *ReadTransaction) Performer() models.PerformerReader { - return r.PerformerMock() -} - -func (r *ReadTransaction) SceneMarker() models.SceneMarkerReader { - return r.SceneMarkerMock() -} - -func (r *ReadTransaction) Scene() models.SceneReader { - return r.SceneMock() -} - -func (r *ReadTransaction) ScrapedItem() models.ScrapedItemReader { - return r.ScrapedItemMock() -} - -func (r *ReadTransaction) Studio() models.StudioReader { - return r.StudioMock() -} - -func (r *ReadTransaction) Tag() models.TagReader { - return r.TagMock() -} - -func (r *ReadTransaction) SavedFilter() models.SavedFilterReader { - return r.SavedFilterMock() -} diff --git a/pkg/models/model_file.go b/pkg/models/model_file.go index 21fd51bab..2a1d31900 100644 --- a/pkg/models/model_file.go +++ b/pkg/models/model_file.go @@ -1,6 +1,53 @@ package models -import "time" +import ( + "fmt" + "io" + "strconv" + "time" +) + +type HashAlgorithm string + +const ( + HashAlgorithmMd5 HashAlgorithm = "MD5" + // oshash + HashAlgorithmOshash HashAlgorithm = "OSHASH" +) + +var AllHashAlgorithm = []HashAlgorithm{ + HashAlgorithmMd5, + HashAlgorithmOshash, +} + +func (e HashAlgorithm) IsValid() bool { + switch e { + case HashAlgorithmMd5, HashAlgorithmOshash: + return true + } + return false +} + +func (e HashAlgorithm) String() string { + return string(e) +} + +func (e *HashAlgorithm) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = HashAlgorithm(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid HashAlgorithm", str) + } + return nil +} + +func (e HashAlgorithm) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} type File struct { Checksum string `db:"checksum" json:"checksum"` diff --git a/pkg/models/model_gallery.go b/pkg/models/model_gallery.go index e7b2b09b4..4da6583f8 100644 --- a/pkg/models/model_gallery.go +++ b/pkg/models/model_gallery.go @@ -1,92 +1,147 @@ package models import ( - "database/sql" - "path/filepath" + "context" + "strconv" "time" + + "github.com/stashapp/stash/pkg/file" ) type Gallery struct { - ID int `db:"id" json:"id"` - Path sql.NullString `db:"path" json:"path"` - Checksum string `db:"checksum" json:"checksum"` - Zip bool `db:"zip" json:"zip"` - Title sql.NullString `db:"title" json:"title"` - URL sql.NullString `db:"url" json:"url"` - Date SQLiteDate `db:"date" json:"date"` - Details sql.NullString `db:"details" json:"details"` - Rating sql.NullInt64 `db:"rating" json:"rating"` - Organized bool `db:"organized" json:"organized"` - StudioID sql.NullInt64 `db:"studio_id,omitempty" json:"studio_id"` - FileModTime NullSQLiteTimestamp `db:"file_mod_time" json:"file_mod_time"` - CreatedAt SQLiteTimestamp `db:"created_at" json:"created_at"` - UpdatedAt SQLiteTimestamp `db:"updated_at" json:"updated_at"` + ID int `json:"id"` + + Title string `json:"title"` + URL string `json:"url"` + Date *Date `json:"date"` + Details string `json:"details"` + Rating *int `json:"rating"` + Organized bool `json:"organized"` + StudioID *int `json:"studio_id"` + + // transient - not persisted + Files RelatedFiles + // transient - not persisted + PrimaryFileID *file.ID + // transient - path of primary file or folder + Path string + + FolderID *file.FolderID `json:"folder_id"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + SceneIDs RelatedIDs `json:"scene_ids"` + TagIDs RelatedIDs `json:"tag_ids"` + PerformerIDs RelatedIDs `json:"performer_ids"` +} + +func (g *Gallery) LoadFiles(ctx context.Context, l FileLoader) error { + return g.Files.load(func() ([]file.File, error) { + return l.GetFiles(ctx, g.ID) + }) +} + +func (g *Gallery) LoadPrimaryFile(ctx context.Context, l file.Finder) error { + return g.Files.loadPrimary(func() (file.File, error) { + if g.PrimaryFileID == nil { + return nil, nil + } + + f, err := l.Find(ctx, *g.PrimaryFileID) + if err != nil { + return nil, err + } + + if len(f) > 0 { + return f[0], nil + } + return nil, nil + }) +} + +func (g *Gallery) LoadSceneIDs(ctx context.Context, l SceneIDLoader) error { + return g.SceneIDs.load(func() ([]int, error) { + return l.GetSceneIDs(ctx, g.ID) + }) +} + +func (g *Gallery) LoadPerformerIDs(ctx context.Context, l PerformerIDLoader) error { + return g.PerformerIDs.load(func() ([]int, error) { + return l.GetPerformerIDs(ctx, g.ID) + }) +} + +func (g *Gallery) LoadTagIDs(ctx context.Context, l TagIDLoader) error { + return g.TagIDs.load(func() ([]int, error) { + return l.GetTagIDs(ctx, g.ID) + }) +} + +func (g Gallery) Checksum() string { + if p := g.Files.Primary(); p != nil { + v := p.Base().Fingerprints.Get(file.FingerprintTypeMD5) + if v == nil { + return "" + } + + return v.(string) + } + return "" } // GalleryPartial represents part of a Gallery object. It is used to update // the database entry. Only non-nil fields will be updated. type GalleryPartial struct { - ID int `db:"id" json:"id"` - Path *sql.NullString `db:"path" json:"path"` - Checksum *string `db:"checksum" json:"checksum"` - Title *sql.NullString `db:"title" json:"title"` - URL *sql.NullString `db:"url" json:"url"` - Date *SQLiteDate `db:"date" json:"date"` - Details *sql.NullString `db:"details" json:"details"` - Rating *sql.NullInt64 `db:"rating" json:"rating"` - Organized *bool `db:"organized" json:"organized"` - StudioID *sql.NullInt64 `db:"studio_id,omitempty" json:"studio_id"` - FileModTime *NullSQLiteTimestamp `db:"file_mod_time" json:"file_mod_time"` - CreatedAt *SQLiteTimestamp `db:"created_at" json:"created_at"` - UpdatedAt *SQLiteTimestamp `db:"updated_at" json:"updated_at"` + // Path OptionalString + // Checksum OptionalString + // Zip OptionalBool + Title OptionalString + URL OptionalString + Date OptionalDate + Details OptionalString + Rating OptionalInt + Organized OptionalBool + StudioID OptionalInt + // FileModTime OptionalTime + CreatedAt OptionalTime + UpdatedAt OptionalTime + + SceneIDs *UpdateIDs + TagIDs *UpdateIDs + PerformerIDs *UpdateIDs + PrimaryFileID *file.ID } -func (s *Gallery) File() File { - ret := File{ - Path: s.Path.String, - } - - ret.Checksum = s.Checksum - - if s.FileModTime.Valid { - ret.FileModTime = s.FileModTime.Timestamp - } - - return ret -} - -func (s *Gallery) SetFile(f File) { - path := f.Path - s.Path = sql.NullString{ - String: path, - Valid: true, - } - - if f.Checksum != "" { - s.Checksum = f.Checksum - } - - zeroTime := time.Time{} - if f.FileModTime != zeroTime { - s.FileModTime = NullSQLiteTimestamp{ - Timestamp: f.FileModTime, - Valid: true, - } +func NewGalleryPartial() GalleryPartial { + updatedTime := time.Now() + return GalleryPartial{ + UpdatedAt: NewOptionalTime(updatedTime), } } // GetTitle returns the title of the scene. If the Title field is empty, // then the base filename is returned. -func (s Gallery) GetTitle() string { - if s.Title.String != "" { - return s.Title.String +func (g Gallery) GetTitle() string { + if g.Title != "" { + return g.Title } - if s.Path.Valid { - return filepath.Base(s.Path.String) + return g.Path +} + +// DisplayName returns a display name for the scene for logging purposes. +// It returns the path or title, or otherwise it returns the ID if both of these are empty. +func (g Gallery) DisplayName() string { + if g.Path != "" { + return g.Path } - return "" + if g.Title != "" { + return g.Title + } + + return strconv.Itoa(g.ID) } const DefaultGthumbWidth int = 640 diff --git a/pkg/models/model_image.go b/pkg/models/model_image.go index 4aae450ec..377e0cc5a 100644 --- a/pkg/models/model_image.go +++ b/pkg/models/model_image.go @@ -1,104 +1,137 @@ package models import ( - "database/sql" + "context" + "errors" "path/filepath" "strconv" "time" + + "github.com/stashapp/stash/pkg/file" ) // Image stores the metadata for a single image. type Image struct { - ID int `db:"id" json:"id"` - Checksum string `db:"checksum" json:"checksum"` - Path string `db:"path" json:"path"` - Title sql.NullString `db:"title" json:"title"` - Rating sql.NullInt64 `db:"rating" json:"rating"` - Organized bool `db:"organized" json:"organized"` - OCounter int `db:"o_counter" json:"o_counter"` - Size sql.NullInt64 `db:"size" json:"size"` - Width sql.NullInt64 `db:"width" json:"width"` - Height sql.NullInt64 `db:"height" json:"height"` - StudioID sql.NullInt64 `db:"studio_id,omitempty" json:"studio_id"` - FileModTime NullSQLiteTimestamp `db:"file_mod_time" json:"file_mod_time"` - CreatedAt SQLiteTimestamp `db:"created_at" json:"created_at"` - UpdatedAt SQLiteTimestamp `db:"updated_at" json:"updated_at"` + ID int `json:"id"` + + Title string `json:"title"` + Rating *int `json:"rating"` + Organized bool `json:"organized"` + OCounter int `json:"o_counter"` + StudioID *int `json:"studio_id"` + + // transient - not persisted + Files RelatedImageFiles + PrimaryFileID *file.ID + // transient - path of primary file - empty if no files + Path string + // transient - checksum of primary file - empty if no files + Checksum string + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + GalleryIDs RelatedIDs `json:"gallery_ids"` + TagIDs RelatedIDs `json:"tag_ids"` + PerformerIDs RelatedIDs `json:"performer_ids"` } -// ImagePartial represents part of a Image object. It is used to update -// the database entry. Only non-nil fields will be updated. -type ImagePartial struct { - ID int `db:"id" json:"id"` - Checksum *string `db:"checksum" json:"checksum"` - Path *string `db:"path" json:"path"` - Title *sql.NullString `db:"title" json:"title"` - Rating *sql.NullInt64 `db:"rating" json:"rating"` - Organized *bool `db:"organized" json:"organized"` - Size *sql.NullInt64 `db:"size" json:"size"` - Width *sql.NullInt64 `db:"width" json:"width"` - Height *sql.NullInt64 `db:"height" json:"height"` - StudioID *sql.NullInt64 `db:"studio_id,omitempty" json:"studio_id"` - FileModTime *NullSQLiteTimestamp `db:"file_mod_time" json:"file_mod_time"` - CreatedAt *SQLiteTimestamp `db:"created_at" json:"created_at"` - UpdatedAt *SQLiteTimestamp `db:"updated_at" json:"updated_at"` +func (i *Image) LoadFiles(ctx context.Context, l ImageFileLoader) error { + return i.Files.load(func() ([]*file.ImageFile, error) { + return l.GetFiles(ctx, i.ID) + }) } -func (i *Image) File() File { - ret := File{ - Path: i.Path, - } - - ret.Checksum = i.Checksum - if i.FileModTime.Valid { - ret.FileModTime = i.FileModTime.Timestamp - } - if i.Size.Valid { - ret.Size = strconv.FormatInt(i.Size.Int64, 10) - } - - return ret -} - -func (i *Image) SetFile(f File) { - path := f.Path - i.Path = path - - if f.Checksum != "" { - i.Checksum = f.Checksum - } - zeroTime := time.Time{} - if f.FileModTime != zeroTime { - i.FileModTime = NullSQLiteTimestamp{ - Timestamp: f.FileModTime, - Valid: true, +func (i *Image) LoadPrimaryFile(ctx context.Context, l file.Finder) error { + return i.Files.loadPrimary(func() (*file.ImageFile, error) { + if i.PrimaryFileID == nil { + return nil, nil } - } - if f.Size != "" { - size, err := strconv.ParseInt(f.Size, 10, 64) - if err == nil { - i.Size = sql.NullInt64{ - Int64: size, - Valid: true, + + f, err := l.Find(ctx, *i.PrimaryFileID) + if err != nil { + return nil, err + } + + var vf *file.ImageFile + if len(f) > 0 { + var ok bool + vf, ok = f[0].(*file.ImageFile) + if !ok { + return nil, errors.New("not an image file") } } - } + return vf, nil + }) +} + +func (i *Image) LoadGalleryIDs(ctx context.Context, l GalleryIDLoader) error { + return i.GalleryIDs.load(func() ([]int, error) { + return l.GetGalleryIDs(ctx, i.ID) + }) +} + +func (i *Image) LoadPerformerIDs(ctx context.Context, l PerformerIDLoader) error { + return i.PerformerIDs.load(func() ([]int, error) { + return l.GetPerformerIDs(ctx, i.ID) + }) +} + +func (i *Image) LoadTagIDs(ctx context.Context, l TagIDLoader) error { + return i.TagIDs.load(func() ([]int, error) { + return l.GetTagIDs(ctx, i.ID) + }) } // GetTitle returns the title of the image. If the Title field is empty, // then the base filename is returned. -func (i *Image) GetTitle() string { - if i.Title.String != "" { - return i.Title.String +func (i Image) GetTitle() string { + if i.Title != "" { + return i.Title } - return filepath.Base(i.Path) + if i.Path != "" { + return filepath.Base(i.Path) + } + + return "" } -// ImageFileType represents the file metadata for an image. -type ImageFileType struct { - Size *int `graphql:"size" json:"size"` - Width *int `graphql:"width" json:"width"` - Height *int `graphql:"height" json:"height"` +// DisplayName returns a display name for the scene for logging purposes. +// It returns Path if not empty, otherwise it returns the ID. +func (i Image) DisplayName() string { + if i.Path != "" { + return i.Path + } + + return strconv.Itoa(i.ID) +} + +type ImageCreateInput struct { + *Image + FileIDs []file.ID +} + +type ImagePartial struct { + Title OptionalString + Rating OptionalInt + Organized OptionalBool + OCounter OptionalInt + StudioID OptionalInt + CreatedAt OptionalTime + UpdatedAt OptionalTime + + GalleryIDs *UpdateIDs + TagIDs *UpdateIDs + PerformerIDs *UpdateIDs + PrimaryFileID *file.ID +} + +func NewImagePartial() ImagePartial { + updatedTime := time.Now() + return ImagePartial{ + UpdatedAt: NewOptionalTime(updatedTime), + } } type Images []*Image diff --git a/pkg/models/model_joins.go b/pkg/models/model_joins.go index 1eebcd2f1..bcd47c9a9 100644 --- a/pkg/models/model_joins.go +++ b/pkg/models/model_joins.go @@ -1,21 +1,62 @@ package models -import "database/sql" +import ( + "fmt" + "strconv" +) type MoviesScenes struct { - MovieID int `db:"movie_id" json:"movie_id"` - SceneID int `db:"scene_id" json:"scene_id"` - SceneIndex sql.NullInt64 `db:"scene_index" json:"scene_index"` + MovieID int `json:"movie_id"` + // SceneID int `json:"scene_id"` + SceneIndex *int `json:"scene_index"` } -type StashID struct { - StashID string `db:"stash_id" json:"stash_id"` - Endpoint string `db:"endpoint" json:"endpoint"` -} - -func (s StashID) StashIDInput() StashIDInput { - return StashIDInput{ - Endpoint: s.Endpoint, - StashID: s.StashID, +func (s MoviesScenes) SceneMovieInput() *SceneMovieInput { + return &SceneMovieInput{ + MovieID: strconv.Itoa(s.MovieID), + SceneIndex: s.SceneIndex, } } + +func (s MoviesScenes) Equal(o MoviesScenes) bool { + return o.MovieID == s.MovieID && ((o.SceneIndex == nil && s.SceneIndex == nil) || + (o.SceneIndex != nil && s.SceneIndex != nil && *o.SceneIndex == *s.SceneIndex)) +} + +type UpdateMovieIDs struct { + Movies []MoviesScenes `json:"movies"` + Mode RelationshipUpdateMode `json:"mode"` +} + +func (u *UpdateMovieIDs) SceneMovieInputs() []*SceneMovieInput { + if u == nil { + return nil + } + + ret := make([]*SceneMovieInput, len(u.Movies)) + for _, id := range u.Movies { + ret = append(ret, id.SceneMovieInput()) + } + + return ret +} + +func UpdateMovieIDsFromInput(i []*SceneMovieInput) (*UpdateMovieIDs, error) { + ret := &UpdateMovieIDs{ + Mode: RelationshipUpdateModeSet, + } + + for _, v := range i { + mID, err := strconv.Atoi(v.MovieID) + if err != nil { + return nil, fmt.Errorf("invalid movie ID: %s", v.MovieID) + } + + ret.Movies = append(ret.Movies, MoviesScenes{ + MovieID: mID, + SceneIndex: v.SceneIndex, + }) + } + + return ret, nil +} diff --git a/pkg/models/model_saved_filter.go b/pkg/models/model_saved_filter.go index 5acd6d8f8..618e9fe30 100644 --- a/pkg/models/model_saved_filter.go +++ b/pkg/models/model_saved_filter.go @@ -1,5 +1,64 @@ package models +import ( + "fmt" + "io" + "strconv" +) + +type FilterMode string + +const ( + FilterModeScenes FilterMode = "SCENES" + FilterModePerformers FilterMode = "PERFORMERS" + FilterModeStudios FilterMode = "STUDIOS" + FilterModeGalleries FilterMode = "GALLERIES" + FilterModeSceneMarkers FilterMode = "SCENE_MARKERS" + FilterModeMovies FilterMode = "MOVIES" + FilterModeTags FilterMode = "TAGS" + FilterModeImages FilterMode = "IMAGES" +) + +var AllFilterMode = []FilterMode{ + FilterModeScenes, + FilterModePerformers, + FilterModeStudios, + FilterModeGalleries, + FilterModeSceneMarkers, + FilterModeMovies, + FilterModeTags, + FilterModeImages, +} + +func (e FilterMode) IsValid() bool { + switch e { + case FilterModeScenes, FilterModePerformers, FilterModeStudios, FilterModeGalleries, FilterModeSceneMarkers, FilterModeMovies, FilterModeTags, FilterModeImages: + return true + } + return false +} + +func (e FilterMode) String() string { + return string(e) +} + +func (e *FilterMode) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = FilterMode(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid FilterMode", str) + } + return nil +} + +func (e FilterMode) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + type SavedFilter struct { ID int `db:"id" json:"id"` Mode FilterMode `db:"mode" json:"mode"` diff --git a/pkg/models/model_scene.go b/pkg/models/model_scene.go index 7e1066ed3..f2dfbf28d 100644 --- a/pkg/models/model_scene.go +++ b/pkg/models/model_scene.go @@ -1,203 +1,249 @@ package models import ( - "database/sql" + "context" + "errors" "path/filepath" "strconv" "time" + + "github.com/stashapp/stash/pkg/file" ) // Scene stores the metadata for a single video scene. type Scene struct { - ID int `db:"id" json:"id"` - Checksum sql.NullString `db:"checksum" json:"checksum"` - OSHash sql.NullString `db:"oshash" json:"oshash"` - Path string `db:"path" json:"path"` - Title sql.NullString `db:"title" json:"title"` - Details sql.NullString `db:"details" json:"details"` - URL sql.NullString `db:"url" json:"url"` - Date SQLiteDate `db:"date" json:"date"` - Rating sql.NullInt64 `db:"rating" json:"rating"` - Organized bool `db:"organized" json:"organized"` - OCounter int `db:"o_counter" json:"o_counter"` - Size sql.NullString `db:"size" json:"size"` - Duration sql.NullFloat64 `db:"duration" json:"duration"` - VideoCodec sql.NullString `db:"video_codec" json:"video_codec"` - Format sql.NullString `db:"format" json:"format_name"` - AudioCodec sql.NullString `db:"audio_codec" json:"audio_codec"` - Width sql.NullInt64 `db:"width" json:"width"` - Height sql.NullInt64 `db:"height" json:"height"` - Framerate sql.NullFloat64 `db:"framerate" json:"framerate"` - Bitrate sql.NullInt64 `db:"bitrate" json:"bitrate"` - StudioID sql.NullInt64 `db:"studio_id,omitempty" json:"studio_id"` - FileModTime NullSQLiteTimestamp `db:"file_mod_time" json:"file_mod_time"` - Phash sql.NullInt64 `db:"phash,omitempty" json:"phash"` - CreatedAt SQLiteTimestamp `db:"created_at" json:"created_at"` - UpdatedAt SQLiteTimestamp `db:"updated_at" json:"updated_at"` - Interactive bool `db:"interactive" json:"interactive"` - InteractiveSpeed sql.NullInt64 `db:"interactive_speed" json:"interactive_speed"` + ID int `json:"id"` + Title string `json:"title"` + Details string `json:"details"` + URL string `json:"url"` + Date *Date `json:"date"` + Rating *int `json:"rating"` + Organized bool `json:"organized"` + OCounter int `json:"o_counter"` + StudioID *int `json:"studio_id"` + + // transient - not persisted + Files RelatedVideoFiles + PrimaryFileID *file.ID + // transient - path of primary file - empty if no files + Path string + // transient - oshash of primary file - empty if no files + OSHash string + // transient - checksum of primary file - empty if no files + Checksum string + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + GalleryIDs RelatedIDs `json:"gallery_ids"` + TagIDs RelatedIDs `json:"tag_ids"` + PerformerIDs RelatedIDs `json:"performer_ids"` + Movies RelatedMovies `json:"movies"` + StashIDs RelatedStashIDs `json:"stash_ids"` } -func (s *Scene) File() File { - ret := File{ - Path: s.Path, - } - - if s.Checksum.Valid { - ret.Checksum = s.Checksum.String - } - if s.OSHash.Valid { - ret.OSHash = s.OSHash.String - } - if s.FileModTime.Valid { - ret.FileModTime = s.FileModTime.Timestamp - } - if s.Size.Valid { - ret.Size = s.Size.String - } - - return ret +func (s *Scene) LoadFiles(ctx context.Context, l VideoFileLoader) error { + return s.Files.load(func() ([]*file.VideoFile, error) { + return l.GetFiles(ctx, s.ID) + }) } -func (s *Scene) SetFile(f File) { - path := f.Path - s.Path = path +func (s *Scene) LoadPrimaryFile(ctx context.Context, l file.Finder) error { + return s.Files.loadPrimary(func() (*file.VideoFile, error) { + if s.PrimaryFileID == nil { + return nil, nil + } - if f.Checksum != "" { - s.Checksum = sql.NullString{ - String: f.Checksum, - Valid: true, + f, err := l.Find(ctx, *s.PrimaryFileID) + if err != nil { + return nil, err } - } - if f.OSHash != "" { - s.OSHash = sql.NullString{ - String: f.OSHash, - Valid: true, + + var vf *file.VideoFile + if len(f) > 0 { + var ok bool + vf, ok = f[0].(*file.VideoFile) + if !ok { + return nil, errors.New("not a video file") + } } + return vf, nil + }) +} + +func (s *Scene) LoadGalleryIDs(ctx context.Context, l GalleryIDLoader) error { + return s.GalleryIDs.load(func() ([]int, error) { + return l.GetGalleryIDs(ctx, s.ID) + }) +} + +func (s *Scene) LoadPerformerIDs(ctx context.Context, l PerformerIDLoader) error { + return s.PerformerIDs.load(func() ([]int, error) { + return l.GetPerformerIDs(ctx, s.ID) + }) +} + +func (s *Scene) LoadTagIDs(ctx context.Context, l TagIDLoader) error { + return s.TagIDs.load(func() ([]int, error) { + return l.GetTagIDs(ctx, s.ID) + }) +} + +func (s *Scene) LoadMovies(ctx context.Context, l SceneMovieLoader) error { + return s.Movies.load(func() ([]MoviesScenes, error) { + return l.GetMovies(ctx, s.ID) + }) +} + +func (s *Scene) LoadStashIDs(ctx context.Context, l StashIDLoader) error { + return s.StashIDs.load(func() ([]StashID, error) { + return l.GetStashIDs(ctx, s.ID) + }) +} + +func (s *Scene) LoadRelationships(ctx context.Context, l SceneReader) error { + if err := s.LoadGalleryIDs(ctx, l); err != nil { + return err } - zeroTime := time.Time{} - if f.FileModTime != zeroTime { - s.FileModTime = NullSQLiteTimestamp{ - Timestamp: f.FileModTime, - Valid: true, - } + + if err := s.LoadPerformerIDs(ctx, l); err != nil { + return err } - if f.Size != "" { - s.Size = sql.NullString{ - String: f.Size, - Valid: true, - } + + if err := s.LoadTagIDs(ctx, l); err != nil { + return err } + + if err := s.LoadMovies(ctx, l); err != nil { + return err + } + + if err := s.LoadStashIDs(ctx, l); err != nil { + return err + } + + if err := s.LoadFiles(ctx, l); err != nil { + return err + } + + return nil } // ScenePartial represents part of a Scene object. It is used to update -// the database entry. Only non-nil fields will be updated. +// the database entry. type ScenePartial struct { - ID int `db:"id" json:"id"` - Checksum *sql.NullString `db:"checksum" json:"checksum"` - OSHash *sql.NullString `db:"oshash" json:"oshash"` - Path *string `db:"path" json:"path"` - Title *sql.NullString `db:"title" json:"title"` - Details *sql.NullString `db:"details" json:"details"` - URL *sql.NullString `db:"url" json:"url"` - Date *SQLiteDate `db:"date" json:"date"` - Rating *sql.NullInt64 `db:"rating" json:"rating"` - Organized *bool `db:"organized" json:"organized"` - Size *sql.NullString `db:"size" json:"size"` - Duration *sql.NullFloat64 `db:"duration" json:"duration"` - VideoCodec *sql.NullString `db:"video_codec" json:"video_codec"` - Format *sql.NullString `db:"format" json:"format_name"` - AudioCodec *sql.NullString `db:"audio_codec" json:"audio_codec"` - Width *sql.NullInt64 `db:"width" json:"width"` - Height *sql.NullInt64 `db:"height" json:"height"` - Framerate *sql.NullFloat64 `db:"framerate" json:"framerate"` - Bitrate *sql.NullInt64 `db:"bitrate" json:"bitrate"` - StudioID *sql.NullInt64 `db:"studio_id,omitempty" json:"studio_id"` - MovieID *sql.NullInt64 `db:"movie_id,omitempty" json:"movie_id"` - FileModTime *NullSQLiteTimestamp `db:"file_mod_time" json:"file_mod_time"` - Phash *sql.NullInt64 `db:"phash,omitempty" json:"phash"` - CreatedAt *SQLiteTimestamp `db:"created_at" json:"created_at"` - UpdatedAt *SQLiteTimestamp `db:"updated_at" json:"updated_at"` - Interactive *bool `db:"interactive" json:"interactive"` - InteractiveSpeed *sql.NullInt64 `db:"interactive_speed" json:"interactive_speed"` + Title OptionalString + Details OptionalString + URL OptionalString + Date OptionalDate + Rating OptionalInt + Organized OptionalBool + OCounter OptionalInt + StudioID OptionalInt + CreatedAt OptionalTime + UpdatedAt OptionalTime + + GalleryIDs *UpdateIDs + TagIDs *UpdateIDs + PerformerIDs *UpdateIDs + MovieIDs *UpdateMovieIDs + StashIDs *UpdateStashIDs + PrimaryFileID *file.ID +} + +func NewScenePartial() ScenePartial { + updatedTime := time.Now() + return ScenePartial{ + UpdatedAt: NewOptionalTime(updatedTime), + } +} + +type SceneMovieInput struct { + MovieID string `json:"movie_id"` + SceneIndex *int `json:"scene_index"` +} + +type SceneUpdateInput struct { + ClientMutationID *string `json:"clientMutationId"` + ID string `json:"id"` + Title *string `json:"title"` + Details *string `json:"details"` + URL *string `json:"url"` + Date *string `json:"date"` + Rating *int `json:"rating"` + Organized *bool `json:"organized"` + StudioID *string `json:"studio_id"` + GalleryIds []string `json:"gallery_ids"` + PerformerIds []string `json:"performer_ids"` + Movies []*SceneMovieInput `json:"movies"` + TagIds []string `json:"tag_ids"` + // This should be a URL or a base64 encoded data URL + CoverImage *string `json:"cover_image"` + StashIds []StashID `json:"stash_ids"` } // UpdateInput constructs a SceneUpdateInput using the populated fields in the ScenePartial object. -func (s ScenePartial) UpdateInput() SceneUpdateInput { - boolPtrCopy := func(v *bool) *bool { - if v == nil { - return nil - } +func (s ScenePartial) UpdateInput(id int) SceneUpdateInput { + var dateStr *string + if s.Date.Set { + d := s.Date.Value + v := d.String() + dateStr = &v + } - vv := *v - return &vv + var stashIDs []StashID + if s.StashIDs != nil { + stashIDs = s.StashIDs.StashIDs } return SceneUpdateInput{ - ID: strconv.Itoa(s.ID), - Title: nullStringPtrToStringPtr(s.Title), - Details: nullStringPtrToStringPtr(s.Details), - URL: nullStringPtrToStringPtr(s.URL), - Date: s.Date.StringPtr(), - Rating: nullInt64PtrToIntPtr(s.Rating), - Organized: boolPtrCopy(s.Organized), - StudioID: nullInt64PtrToStringPtr(s.StudioID), - } -} - -func (s *ScenePartial) SetFile(f File) { - path := f.Path - s.Path = &path - - if f.Checksum != "" { - s.Checksum = &sql.NullString{ - String: f.Checksum, - Valid: true, - } - } - if f.OSHash != "" { - s.OSHash = &sql.NullString{ - String: f.OSHash, - Valid: true, - } - } - zeroTime := time.Time{} - if f.FileModTime != zeroTime { - s.FileModTime = &NullSQLiteTimestamp{ - Timestamp: f.FileModTime, - Valid: true, - } - } - if f.Size != "" { - s.Size = &sql.NullString{ - String: f.Size, - Valid: true, - } + ID: strconv.Itoa(id), + Title: s.Title.Ptr(), + Details: s.Details.Ptr(), + URL: s.URL.Ptr(), + Date: dateStr, + Rating: s.Rating.Ptr(), + Organized: s.Organized.Ptr(), + StudioID: s.StudioID.StringPtr(), + GalleryIds: s.GalleryIDs.IDStrings(), + PerformerIds: s.PerformerIDs.IDStrings(), + Movies: s.MovieIDs.SceneMovieInputs(), + TagIds: s.TagIDs.IDStrings(), + StashIds: stashIDs, } } // GetTitle returns the title of the scene. If the Title field is empty, // then the base filename is returned. func (s Scene) GetTitle() string { - if s.Title.String != "" { - return s.Title.String + if s.Title != "" { + return s.Title } return filepath.Base(s.Path) } +// DisplayName returns a display name for the scene for logging purposes. +// It returns Path if not empty, otherwise it returns the ID. +func (s Scene) DisplayName() string { + if s.Path != "" { + return s.Path + } + + return strconv.Itoa(s.ID) +} + // GetHash returns the hash of the scene, based on the hash algorithm provided. If // hash algorithm is MD5, then Checksum is returned. Otherwise, OSHash is returned. func (s Scene) GetHash(hashAlgorithm HashAlgorithm) string { - return s.File().GetHash(hashAlgorithm) -} - -func (s Scene) GetMinResolution() int64 { - if s.Width.Int64 < s.Height.Int64 { - return s.Width.Int64 + switch hashAlgorithm { + case HashAlgorithmMd5: + return s.Checksum + case HashAlgorithmOshash: + return s.OSHash } - return s.Height.Int64 + return "" } // SceneFileType represents the file metadata for a scene. @@ -222,12 +268,12 @@ func (s *Scenes) New() interface{} { return &Scene{} } -type SceneCaption struct { +type VideoCaption struct { LanguageCode string `json:"language_code"` Filename string `json:"filename"` CaptionType string `json:"caption_type"` } -func (c SceneCaption) Path(scenePath string) string { - return filepath.Join(filepath.Dir(scenePath), c.Filename) +func (c VideoCaption) Path(filePath string) string { + return filepath.Join(filepath.Dir(filePath), c.Filename) } diff --git a/pkg/models/model_scene_test.go b/pkg/models/model_scene_test.go index 43216e539..e4f1e37ac 100644 --- a/pkg/models/model_scene_test.go +++ b/pkg/models/model_scene_test.go @@ -1,7 +1,6 @@ package models import ( - "database/sql" "reflect" "testing" ) @@ -23,31 +22,25 @@ func TestScenePartial_UpdateInput(t *testing.T) { studioIDStr = "2" ) + dateObj := NewDate(date) + tests := []struct { name string + id int s ScenePartial want SceneUpdateInput }{ { "full", + id, ScenePartial{ - ID: id, - Title: NullStringPtr(title), - Details: NullStringPtr(details), - URL: NullStringPtr(url), - Date: &SQLiteDate{ - String: date, - Valid: true, - }, - Rating: &sql.NullInt64{ - Int64: int64(rating), - Valid: true, - }, - Organized: &organized, - StudioID: &sql.NullInt64{ - Int64: int64(studioID), - Valid: true, - }, + Title: NewOptionalString(title), + Details: NewOptionalString(details), + URL: NewOptionalString(url), + Date: NewOptionalDate(dateObj), + Rating: NewOptionalInt(rating), + Organized: NewOptionalBool(organized), + StudioID: NewOptionalInt(studioID), }, SceneUpdateInput{ ID: idStr, @@ -62,9 +55,8 @@ func TestScenePartial_UpdateInput(t *testing.T) { }, { "empty", - ScenePartial{ - ID: id, - }, + id, + ScenePartial{}, SceneUpdateInput{ ID: idStr, }, @@ -72,7 +64,7 @@ func TestScenePartial_UpdateInput(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := tt.s.UpdateInput(); !reflect.DeepEqual(got, tt.want) { + if got := tt.s.UpdateInput(tt.id); !reflect.DeepEqual(got, tt.want) { t.Errorf("ScenePartial.UpdateInput() = %v, want %v", got, tt.want) } }) diff --git a/pkg/models/model_scraped_item.go b/pkg/models/model_scraped_item.go index 4035163b7..9563fbfd2 100644 --- a/pkg/models/model_scraped_item.go +++ b/pkg/models/model_scraped_item.go @@ -4,6 +4,78 @@ import ( "database/sql" ) +type ScrapedStudio struct { + // Set if studio matched + StoredID *string `json:"stored_id"` + Name string `json:"name"` + URL *string `json:"url"` + Image *string `json:"image"` + RemoteSiteID *string `json:"remote_site_id"` +} + +func (ScrapedStudio) IsScrapedContent() {} + +// A performer from a scraping operation... +type ScrapedPerformer struct { + // Set if performer matched + StoredID *string `json:"stored_id"` + Name *string `json:"name"` + Gender *string `json:"gender"` + URL *string `json:"url"` + Twitter *string `json:"twitter"` + Instagram *string `json:"instagram"` + Birthdate *string `json:"birthdate"` + Ethnicity *string `json:"ethnicity"` + Country *string `json:"country"` + EyeColor *string `json:"eye_color"` + Height *string `json:"height"` + Measurements *string `json:"measurements"` + FakeTits *string `json:"fake_tits"` + CareerLength *string `json:"career_length"` + Tattoos *string `json:"tattoos"` + Piercings *string `json:"piercings"` + Aliases *string `json:"aliases"` + Tags []*ScrapedTag `json:"tags"` + // This should be a base64 encoded data URL + Image *string `json:"image"` + Images []string `json:"images"` + Details *string `json:"details"` + DeathDate *string `json:"death_date"` + HairColor *string `json:"hair_color"` + Weight *string `json:"weight"` + RemoteSiteID *string `json:"remote_site_id"` +} + +func (ScrapedPerformer) IsScrapedContent() {} + +type ScrapedTag struct { + // Set if tag matched + StoredID *string `json:"stored_id"` + Name string `json:"name"` +} + +func (ScrapedTag) IsScrapedContent() {} + +// A movie from a scraping operation... +type ScrapedMovie struct { + StoredID *string `json:"stored_id"` + Name *string `json:"name"` + Aliases *string `json:"aliases"` + Duration *string `json:"duration"` + Date *string `json:"date"` + Rating *string `json:"rating"` + Director *string `json:"director"` + URL *string `json:"url"` + Synopsis *string `json:"synopsis"` + Studio *ScrapedStudio `json:"studio"` + // This should be a base64 encoded data URL + FrontImage *string `json:"front_image"` + // This should be a base64 encoded data URL + BackImage *string `json:"back_image"` +} + +func (ScrapedMovie) IsScrapedContent() {} + type ScrapedItem struct { ID int `db:"id" json:"id"` Title sql.NullString `db:"title" json:"title"` diff --git a/pkg/models/movie.go b/pkg/models/movie.go index 3d11e1e51..3fc1890a6 100644 --- a/pkg/models/movie.go +++ b/pkg/models/movie.go @@ -1,29 +1,49 @@ package models +import "context" + +type MovieFilterType struct { + Name *StringCriterionInput `json:"name"` + Director *StringCriterionInput `json:"director"` + Synopsis *StringCriterionInput `json:"synopsis"` + // Filter by duration (in seconds) + Duration *IntCriterionInput `json:"duration"` + // Filter by rating + Rating *IntCriterionInput `json:"rating"` + // Filter to only include movies with this studio + Studios *HierarchicalMultiCriterionInput `json:"studios"` + // Filter to only include movies missing this property + IsMissing *string `json:"is_missing"` + // Filter by url + URL *StringCriterionInput `json:"url"` + // Filter to only include movies where performer appears in a scene + Performers *MultiCriterionInput `json:"performers"` +} + type MovieReader interface { - Find(id int) (*Movie, error) - FindMany(ids []int) ([]*Movie, error) + Find(ctx context.Context, id int) (*Movie, error) + FindMany(ctx context.Context, ids []int) ([]*Movie, error) // FindBySceneID(sceneID int) ([]*Movie, error) - FindByName(name string, nocase bool) (*Movie, error) - FindByNames(names []string, nocase bool) ([]*Movie, error) - All() ([]*Movie, error) - Count() (int, error) - Query(movieFilter *MovieFilterType, findFilter *FindFilterType) ([]*Movie, int, error) - GetFrontImage(movieID int) ([]byte, error) - GetBackImage(movieID int) ([]byte, error) - FindByPerformerID(performerID int) ([]*Movie, error) - CountByPerformerID(performerID int) (int, error) - FindByStudioID(studioID int) ([]*Movie, error) - CountByStudioID(studioID int) (int, error) + FindByName(ctx context.Context, name string, nocase bool) (*Movie, error) + FindByNames(ctx context.Context, names []string, nocase bool) ([]*Movie, error) + All(ctx context.Context) ([]*Movie, error) + Count(ctx context.Context) (int, error) + Query(ctx context.Context, movieFilter *MovieFilterType, findFilter *FindFilterType) ([]*Movie, int, error) + GetFrontImage(ctx context.Context, movieID int) ([]byte, error) + GetBackImage(ctx context.Context, movieID int) ([]byte, error) + FindByPerformerID(ctx context.Context, performerID int) ([]*Movie, error) + CountByPerformerID(ctx context.Context, performerID int) (int, error) + FindByStudioID(ctx context.Context, studioID int) ([]*Movie, error) + CountByStudioID(ctx context.Context, studioID int) (int, error) } type MovieWriter interface { - Create(newMovie Movie) (*Movie, error) - Update(updatedMovie MoviePartial) (*Movie, error) - UpdateFull(updatedMovie Movie) (*Movie, error) - Destroy(id int) error - UpdateImages(movieID int, frontImage []byte, backImage []byte) error - DestroyImages(movieID int) error + Create(ctx context.Context, newMovie Movie) (*Movie, error) + Update(ctx context.Context, updatedMovie MoviePartial) (*Movie, error) + UpdateFull(ctx context.Context, updatedMovie Movie) (*Movie, error) + Destroy(ctx context.Context, id int) error + UpdateImages(ctx context.Context, movieID int, frontImage []byte, backImage []byte) error + DestroyImages(ctx context.Context, movieID int) error } type MovieReaderWriter interface { diff --git a/pkg/models/paths/paths_json.go b/pkg/models/paths/paths_json.go index 486ffe71d..7f05027c4 100644 --- a/pkg/models/paths/paths_json.go +++ b/pkg/models/paths/paths_json.go @@ -10,8 +10,7 @@ import ( type JSONPaths struct { Metadata string - MappingsFile string - ScrapedFile string + ScrapedFile string Performers string Scenes string @@ -20,12 +19,12 @@ type JSONPaths struct { Studios string Tags string Movies string + Files string } func newJSONPaths(baseDir string) *JSONPaths { jp := JSONPaths{} jp.Metadata = baseDir - jp.MappingsFile = filepath.Join(baseDir, "mappings.json") jp.ScrapedFile = filepath.Join(baseDir, "scraped.json") jp.Performers = filepath.Join(baseDir, "performers") jp.Scenes = filepath.Join(baseDir, "scenes") @@ -34,6 +33,7 @@ func newJSONPaths(baseDir string) *JSONPaths { jp.Studios = filepath.Join(baseDir, "studios") jp.Movies = filepath.Join(baseDir, "movies") jp.Tags = filepath.Join(baseDir, "tags") + jp.Files = filepath.Join(baseDir, "files") return &jp } @@ -42,6 +42,18 @@ func GetJSONPaths(baseDir string) *JSONPaths { return jp } +func EmptyJSONDirs(baseDir string) { + jsonPaths := GetJSONPaths(baseDir) + _ = fsutil.EmptyDir(jsonPaths.Scenes) + _ = fsutil.EmptyDir(jsonPaths.Images) + _ = fsutil.EmptyDir(jsonPaths.Galleries) + _ = fsutil.EmptyDir(jsonPaths.Performers) + _ = fsutil.EmptyDir(jsonPaths.Studios) + _ = fsutil.EmptyDir(jsonPaths.Movies) + _ = fsutil.EmptyDir(jsonPaths.Tags) + _ = fsutil.EmptyDir(jsonPaths.Files) +} + func EnsureJSONDirs(baseDir string) { jsonPaths := GetJSONPaths(baseDir) if err := fsutil.EnsureDir(jsonPaths.Metadata); err != nil { @@ -68,32 +80,7 @@ func EnsureJSONDirs(baseDir string) { if err := fsutil.EnsureDir(jsonPaths.Tags); err != nil { logger.Warnf("couldn't create directories for Tags: %v", err) } -} - -func (jp *JSONPaths) PerformerJSONPath(checksum string) string { - return filepath.Join(jp.Performers, checksum+".json") -} - -func (jp *JSONPaths) SceneJSONPath(checksum string) string { - return filepath.Join(jp.Scenes, checksum+".json") -} - -func (jp *JSONPaths) ImageJSONPath(checksum string) string { - return filepath.Join(jp.Images, checksum+".json") -} - -func (jp *JSONPaths) GalleryJSONPath(checksum string) string { - return filepath.Join(jp.Galleries, checksum+".json") -} - -func (jp *JSONPaths) StudioJSONPath(checksum string) string { - return filepath.Join(jp.Studios, checksum+".json") -} - -func (jp *JSONPaths) TagJSONPath(checksum string) string { - return filepath.Join(jp.Tags, checksum+".json") -} - -func (jp *JSONPaths) MovieJSONPath(checksum string) string { - return filepath.Join(jp.Movies, checksum+".json") + if err := fsutil.EnsureDir(jsonPaths.Files); err != nil { + logger.Warnf("couldn't create directories for Files: %v", err) + } } diff --git a/pkg/models/performer.go b/pkg/models/performer.go index 04173b47e..a00eea7fc 100644 --- a/pkg/models/performer.go +++ b/pkg/models/performer.go @@ -1,36 +1,165 @@ package models +import ( + "context" + "fmt" + "io" + "strconv" +) + +type GenderEnum string + +const ( + GenderEnumMale GenderEnum = "MALE" + GenderEnumFemale GenderEnum = "FEMALE" + GenderEnumTransgenderMale GenderEnum = "TRANSGENDER_MALE" + GenderEnumTransgenderFemale GenderEnum = "TRANSGENDER_FEMALE" + GenderEnumIntersex GenderEnum = "INTERSEX" + GenderEnumNonBinary GenderEnum = "NON_BINARY" +) + +var AllGenderEnum = []GenderEnum{ + GenderEnumMale, + GenderEnumFemale, + GenderEnumTransgenderMale, + GenderEnumTransgenderFemale, + GenderEnumIntersex, + GenderEnumNonBinary, +} + +func (e GenderEnum) IsValid() bool { + switch e { + case GenderEnumMale, GenderEnumFemale, GenderEnumTransgenderMale, GenderEnumTransgenderFemale, GenderEnumIntersex, GenderEnumNonBinary: + return true + } + return false +} + +func (e GenderEnum) String() string { + return string(e) +} + +func (e *GenderEnum) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = GenderEnum(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid GenderEnum", str) + } + return nil +} + +func (e GenderEnum) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type GenderCriterionInput struct { + Value *GenderEnum `json:"value"` + Modifier CriterionModifier `json:"modifier"` +} + +type PerformerFilterType struct { + And *PerformerFilterType `json:"AND"` + Or *PerformerFilterType `json:"OR"` + Not *PerformerFilterType `json:"NOT"` + Name *StringCriterionInput `json:"name"` + Details *StringCriterionInput `json:"details"` + // Filter by favorite + FilterFavorites *bool `json:"filter_favorites"` + // Filter by birth year + BirthYear *IntCriterionInput `json:"birth_year"` + // Filter by age + Age *IntCriterionInput `json:"age"` + // Filter by ethnicity + Ethnicity *StringCriterionInput `json:"ethnicity"` + // Filter by country + Country *StringCriterionInput `json:"country"` + // Filter by eye color + EyeColor *StringCriterionInput `json:"eye_color"` + // Filter by height + Height *StringCriterionInput `json:"height"` + // Filter by measurements + Measurements *StringCriterionInput `json:"measurements"` + // Filter by fake tits value + FakeTits *StringCriterionInput `json:"fake_tits"` + // Filter by career length + CareerLength *StringCriterionInput `json:"career_length"` + // Filter by tattoos + Tattoos *StringCriterionInput `json:"tattoos"` + // Filter by piercings + Piercings *StringCriterionInput `json:"piercings"` + // Filter by aliases + Aliases *StringCriterionInput `json:"aliases"` + // Filter by gender + Gender *GenderCriterionInput `json:"gender"` + // Filter to only include performers missing this property + IsMissing *string `json:"is_missing"` + // Filter to only include performers with these tags + Tags *HierarchicalMultiCriterionInput `json:"tags"` + // Filter by tag count + TagCount *IntCriterionInput `json:"tag_count"` + // Filter by scene count + SceneCount *IntCriterionInput `json:"scene_count"` + // Filter by image count + ImageCount *IntCriterionInput `json:"image_count"` + // Filter by gallery count + GalleryCount *IntCriterionInput `json:"gallery_count"` + // Filter by StashID + StashID *StringCriterionInput `json:"stash_id"` + // Filter by rating + Rating *IntCriterionInput `json:"rating"` + // Filter by url + URL *StringCriterionInput `json:"url"` + // Filter by hair color + HairColor *StringCriterionInput `json:"hair_color"` + // Filter by weight + Weight *IntCriterionInput `json:"weight"` + // Filter by death year + DeathYear *IntCriterionInput `json:"death_year"` + // Filter by studios where performer appears in scene/image/gallery + Studios *HierarchicalMultiCriterionInput `json:"studios"` + // Filter by autotag ignore value + IgnoreAutoTag *bool `json:"ignore_auto_tag"` +} + +type PerformerFinder interface { + FindMany(ctx context.Context, ids []int) ([]*Performer, error) +} + type PerformerReader interface { - Find(id int) (*Performer, error) - FindMany(ids []int) ([]*Performer, error) - FindBySceneID(sceneID int) ([]*Performer, error) - FindNamesBySceneID(sceneID int) ([]*Performer, error) - FindByImageID(imageID int) ([]*Performer, error) - FindByGalleryID(galleryID int) ([]*Performer, error) - FindByNames(names []string, nocase bool) ([]*Performer, error) - FindByStashID(stashID StashID) ([]*Performer, error) - FindByStashIDStatus(hasStashID bool, stashboxEndpoint string) ([]*Performer, error) - CountByTagID(tagID int) (int, error) - Count() (int, error) - All() ([]*Performer, error) + Find(ctx context.Context, id int) (*Performer, error) + PerformerFinder + FindBySceneID(ctx context.Context, sceneID int) ([]*Performer, error) + FindNamesBySceneID(ctx context.Context, sceneID int) ([]*Performer, error) + FindByImageID(ctx context.Context, imageID int) ([]*Performer, error) + FindByGalleryID(ctx context.Context, galleryID int) ([]*Performer, error) + FindByNames(ctx context.Context, names []string, nocase bool) ([]*Performer, error) + FindByStashID(ctx context.Context, stashID StashID) ([]*Performer, error) + FindByStashIDStatus(ctx context.Context, hasStashID bool, stashboxEndpoint string) ([]*Performer, error) + CountByTagID(ctx context.Context, tagID int) (int, error) + Count(ctx context.Context) (int, error) + All(ctx context.Context) ([]*Performer, error) // TODO - this interface is temporary until the filter schema can fully // support the query needed - QueryForAutoTag(words []string) ([]*Performer, error) - Query(performerFilter *PerformerFilterType, findFilter *FindFilterType) ([]*Performer, int, error) - GetImage(performerID int) ([]byte, error) - GetStashIDs(performerID int) ([]*StashID, error) - GetTagIDs(performerID int) ([]int, error) + QueryForAutoTag(ctx context.Context, words []string) ([]*Performer, error) + Query(ctx context.Context, performerFilter *PerformerFilterType, findFilter *FindFilterType) ([]*Performer, int, error) + GetImage(ctx context.Context, performerID int) ([]byte, error) + StashIDLoader + GetTagIDs(ctx context.Context, performerID int) ([]int, error) } type PerformerWriter interface { - Create(newPerformer Performer) (*Performer, error) - Update(updatedPerformer PerformerPartial) (*Performer, error) - UpdateFull(updatedPerformer Performer) (*Performer, error) - Destroy(id int) error - UpdateImage(performerID int, image []byte) error - DestroyImage(performerID int) error - UpdateStashIDs(performerID int, stashIDs []StashID) error - UpdateTags(performerID int, tagIDs []int) error + Create(ctx context.Context, newPerformer Performer) (*Performer, error) + Update(ctx context.Context, updatedPerformer PerformerPartial) (*Performer, error) + UpdateFull(ctx context.Context, updatedPerformer Performer) (*Performer, error) + Destroy(ctx context.Context, id int) error + UpdateImage(ctx context.Context, performerID int, image []byte) error + DestroyImage(ctx context.Context, performerID int) error + UpdateStashIDs(ctx context.Context, performerID int, stashIDs []StashID) error + UpdateTags(ctx context.Context, performerID int, tagIDs []int) error } type PerformerReaderWriter interface { diff --git a/pkg/models/relationships.go b/pkg/models/relationships.go new file mode 100644 index 000000000..41bd0a69c --- /dev/null +++ b/pkg/models/relationships.go @@ -0,0 +1,470 @@ +package models + +import ( + "context" + + "github.com/stashapp/stash/pkg/file" +) + +type SceneIDLoader interface { + GetSceneIDs(ctx context.Context, relatedID int) ([]int, error) +} + +type GalleryIDLoader interface { + GetGalleryIDs(ctx context.Context, relatedID int) ([]int, error) +} + +type PerformerIDLoader interface { + GetPerformerIDs(ctx context.Context, relatedID int) ([]int, error) +} + +type TagIDLoader interface { + GetTagIDs(ctx context.Context, relatedID int) ([]int, error) +} + +type SceneMovieLoader interface { + GetMovies(ctx context.Context, id int) ([]MoviesScenes, error) +} + +type StashIDLoader interface { + GetStashIDs(ctx context.Context, relatedID int) ([]StashID, error) +} + +type VideoFileLoader interface { + GetFiles(ctx context.Context, relatedID int) ([]*file.VideoFile, error) +} + +type ImageFileLoader interface { + GetFiles(ctx context.Context, relatedID int) ([]*file.ImageFile, error) +} + +type FileLoader interface { + GetFiles(ctx context.Context, relatedID int) ([]file.File, error) +} + +// RelatedIDs represents a list of related IDs. +// TODO - this can be made generic +type RelatedIDs struct { + list []int +} + +// NewRelatedIDs returns a loaded RelatedIDs object with the provided IDs. +// Loaded will return true when called on the returned object if the provided slice is not nil. +func NewRelatedIDs(ids []int) RelatedIDs { + return RelatedIDs{ + list: ids, + } +} + +// Loaded returns true if the related IDs have been loaded. +func (r RelatedIDs) Loaded() bool { + return r.list != nil +} + +func (r RelatedIDs) mustLoaded() { + if !r.Loaded() { + panic("list has not been loaded") + } +} + +// List returns the related IDs. Panics if the relationship has not been loaded. +func (r RelatedIDs) List() []int { + r.mustLoaded() + + return r.list +} + +// Add adds the provided ids to the list. Panics if the relationship has not been loaded. +func (r *RelatedIDs) Add(ids ...int) { + r.mustLoaded() + + r.list = append(r.list, ids...) +} + +func (r *RelatedIDs) load(fn func() ([]int, error)) error { + if r.Loaded() { + return nil + } + + ids, err := fn() + if err != nil { + return err + } + + if ids == nil { + ids = []int{} + } + + r.list = ids + + return nil +} + +// RelatedMovies represents a list of related Movies. +type RelatedMovies struct { + list []MoviesScenes +} + +// NewRelatedMovies returns a loaded RelatedMovies object with the provided movies. +// Loaded will return true when called on the returned object if the provided slice is not nil. +func NewRelatedMovies(list []MoviesScenes) RelatedMovies { + return RelatedMovies{ + list: list, + } +} + +// Loaded returns true if the relationship has been loaded. +func (r RelatedMovies) Loaded() bool { + return r.list != nil +} + +func (r RelatedMovies) mustLoaded() { + if !r.Loaded() { + panic("list has not been loaded") + } +} + +// List returns the related Movies. Panics if the relationship has not been loaded. +func (r RelatedMovies) List() []MoviesScenes { + r.mustLoaded() + + return r.list +} + +// Add adds the provided ids to the list. Panics if the relationship has not been loaded. +func (r *RelatedMovies) Add(movies ...MoviesScenes) { + r.mustLoaded() + + r.list = append(r.list, movies...) +} + +func (r *RelatedMovies) load(fn func() ([]MoviesScenes, error)) error { + if r.Loaded() { + return nil + } + + ids, err := fn() + if err != nil { + return err + } + + if ids == nil { + ids = []MoviesScenes{} + } + + r.list = ids + + return nil +} + +type RelatedStashIDs struct { + list []StashID +} + +// NewRelatedStashIDs returns a RelatedStashIDs object with the provided ids. +// Loaded will return true when called on the returned object if the provided slice is not nil. +func NewRelatedStashIDs(list []StashID) RelatedStashIDs { + return RelatedStashIDs{ + list: list, + } +} + +func (r RelatedStashIDs) mustLoaded() { + if !r.Loaded() { + panic("list has not been loaded") + } +} + +// Loaded returns true if the relationship has been loaded. +func (r RelatedStashIDs) Loaded() bool { + return r.list != nil +} + +// List returns the related Stash IDs. Panics if the relationship has not been loaded. +func (r RelatedStashIDs) List() []StashID { + r.mustLoaded() + + return r.list +} + +func (r *RelatedStashIDs) load(fn func() ([]StashID, error)) error { + if r.Loaded() { + return nil + } + + ids, err := fn() + if err != nil { + return err + } + + if ids == nil { + ids = []StashID{} + } + + r.list = ids + + return nil +} + +type RelatedVideoFiles struct { + primaryFile *file.VideoFile + files []*file.VideoFile + primaryLoaded bool +} + +func NewRelatedVideoFiles(files []*file.VideoFile) RelatedVideoFiles { + ret := RelatedVideoFiles{ + files: files, + primaryLoaded: true, + } + + if len(files) > 0 { + ret.primaryFile = files[0] + } + + return ret +} + +func (r *RelatedVideoFiles) SetPrimary(f *file.VideoFile) { + r.primaryFile = f + r.primaryLoaded = true +} + +func (r *RelatedVideoFiles) Set(f []*file.VideoFile) { + r.files = f + if len(r.files) > 0 { + r.primaryFile = r.files[0] + } + + r.primaryLoaded = true +} + +// Loaded returns true if the relationship has been loaded. +func (r RelatedVideoFiles) Loaded() bool { + return r.files != nil +} + +// Loaded returns true if the primary file relationship has been loaded. +func (r RelatedVideoFiles) PrimaryLoaded() bool { + return r.primaryLoaded +} + +// List returns the related files. Panics if the relationship has not been loaded. +func (r RelatedVideoFiles) List() []*file.VideoFile { + if !r.Loaded() { + panic("relationship has not been loaded") + } + + return r.files +} + +// Primary returns the primary file. Panics if the relationship has not been loaded. +func (r RelatedVideoFiles) Primary() *file.VideoFile { + if !r.PrimaryLoaded() { + panic("relationship has not been loaded") + } + + return r.primaryFile +} + +func (r *RelatedVideoFiles) load(fn func() ([]*file.VideoFile, error)) error { + if r.Loaded() { + return nil + } + + var err error + r.files, err = fn() + if err != nil { + return err + } + + if len(r.files) > 0 { + r.primaryFile = r.files[0] + } + + r.primaryLoaded = true + + return nil +} + +func (r *RelatedVideoFiles) loadPrimary(fn func() (*file.VideoFile, error)) error { + if r.PrimaryLoaded() { + return nil + } + + var err error + r.primaryFile, err = fn() + if err != nil { + return err + } + + r.primaryLoaded = true + + return nil +} + +type RelatedImageFiles struct { + primaryFile *file.ImageFile + files []*file.ImageFile + primaryLoaded bool +} + +func NewRelatedImageFiles(files []*file.ImageFile) RelatedImageFiles { + ret := RelatedImageFiles{ + files: files, + primaryLoaded: true, + } + + if len(files) > 0 { + ret.primaryFile = files[0] + } + + return ret +} + +// Loaded returns true if the relationship has been loaded. +func (r RelatedImageFiles) Loaded() bool { + return r.files != nil +} + +// Loaded returns true if the primary file relationship has been loaded. +func (r RelatedImageFiles) PrimaryLoaded() bool { + return r.primaryLoaded +} + +// List returns the related files. Panics if the relationship has not been loaded. +func (r RelatedImageFiles) List() []*file.ImageFile { + if !r.Loaded() { + panic("relationship has not been loaded") + } + + return r.files +} + +// Primary returns the primary file. Panics if the relationship has not been loaded. +func (r RelatedImageFiles) Primary() *file.ImageFile { + if !r.PrimaryLoaded() { + panic("relationship has not been loaded") + } + + return r.primaryFile +} + +func (r *RelatedImageFiles) load(fn func() ([]*file.ImageFile, error)) error { + if r.Loaded() { + return nil + } + + var err error + r.files, err = fn() + if err != nil { + return err + } + + if len(r.files) > 0 { + r.primaryFile = r.files[0] + } + + r.primaryLoaded = true + + return nil +} + +func (r *RelatedImageFiles) loadPrimary(fn func() (*file.ImageFile, error)) error { + if r.PrimaryLoaded() { + return nil + } + + var err error + r.primaryFile, err = fn() + if err != nil { + return err + } + + r.primaryLoaded = true + + return nil +} + +type RelatedFiles struct { + primaryFile file.File + files []file.File + primaryLoaded bool +} + +func NewRelatedFiles(files []file.File) RelatedFiles { + ret := RelatedFiles{ + files: files, + primaryLoaded: true, + } + + if len(files) > 0 { + ret.primaryFile = files[0] + } + + return ret +} + +// Loaded returns true if the relationship has been loaded. +func (r RelatedFiles) Loaded() bool { + return r.files != nil +} + +// Loaded returns true if the primary file relationship has been loaded. +func (r RelatedFiles) PrimaryLoaded() bool { + return r.primaryLoaded +} + +// List returns the related files. Panics if the relationship has not been loaded. +func (r RelatedFiles) List() []file.File { + if !r.Loaded() { + panic("relationship has not been loaded") + } + + return r.files +} + +// Primary returns the primary file. Panics if the relationship has not been loaded. +func (r RelatedFiles) Primary() file.File { + if !r.PrimaryLoaded() { + panic("relationship has not been loaded") + } + + return r.primaryFile +} + +func (r *RelatedFiles) load(fn func() ([]file.File, error)) error { + if r.Loaded() { + return nil + } + + var err error + r.files, err = fn() + if err != nil { + return err + } + + if len(r.files) > 0 { + r.primaryFile = r.files[0] + } + + r.primaryLoaded = true + + return nil +} + +func (r *RelatedFiles) loadPrimary(fn func() (file.File, error)) error { + if r.PrimaryLoaded() { + return nil + } + + var err error + r.primaryFile, err = fn() + if err != nil { + return err + } + + r.primaryLoaded = true + + return nil +} diff --git a/pkg/models/repository.go b/pkg/models/repository.go index 2686d7c3a..45d6c0357 100644 --- a/pkg/models/repository.go +++ b/pkg/models/repository.go @@ -1,27 +1,35 @@ package models -type Repository interface { - Gallery() GalleryReaderWriter - Image() ImageReaderWriter - Movie() MovieReaderWriter - Performer() PerformerReaderWriter - Scene() SceneReaderWriter - SceneMarker() SceneMarkerReaderWriter - ScrapedItem() ScrapedItemReaderWriter - Studio() StudioReaderWriter - Tag() TagReaderWriter - SavedFilter() SavedFilterReaderWriter +import ( + "context" + + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/txn" +) + +type TxnManager interface { + txn.Manager + txn.DatabaseProvider + Reset() error } -type ReaderRepository interface { - Gallery() GalleryReader - Image() ImageReader - Movie() MovieReader - Performer() PerformerReader - Scene() SceneReader - SceneMarker() SceneMarkerReader - ScrapedItem() ScrapedItemReader - Studio() StudioReader - Tag() TagReader - SavedFilter() SavedFilterReader +type Repository struct { + TxnManager + + File file.Store + Folder file.FolderStore + Gallery GalleryReaderWriter + Image ImageReaderWriter + Movie MovieReaderWriter + Performer PerformerReaderWriter + Scene SceneReaderWriter + SceneMarker SceneMarkerReaderWriter + ScrapedItem ScrapedItemReaderWriter + Studio StudioReaderWriter + Tag TagReaderWriter + SavedFilter SavedFilterReaderWriter +} + +func (r *Repository) WithTxn(ctx context.Context, fn txn.TxnFunc) error { + return txn.WithTxn(ctx, r, fn) } diff --git a/pkg/models/resolution.go b/pkg/models/resolution.go new file mode 100644 index 000000000..6b955797a --- /dev/null +++ b/pkg/models/resolution.go @@ -0,0 +1,183 @@ +package models + +import ( + "fmt" + "io" + "strconv" +) + +type ResolutionRange struct { + min, max int +} + +var resolutionRanges = map[ResolutionEnum]ResolutionRange{ + ResolutionEnum("VERY_LOW"): {144, 239}, + ResolutionEnum("LOW"): {240, 359}, + ResolutionEnum("R360P"): {360, 479}, + ResolutionEnum("STANDARD"): {480, 539}, + ResolutionEnum("WEB_HD"): {540, 719}, + ResolutionEnum("STANDARD_HD"): {720, 1079}, + ResolutionEnum("FULL_HD"): {1080, 1439}, + ResolutionEnum("QUAD_HD"): {1440, 1919}, + ResolutionEnum("VR_HD"): {1920, 2159}, + ResolutionEnum("FOUR_K"): {2160, 2879}, + ResolutionEnum("FIVE_K"): {2880, 3383}, + ResolutionEnum("SIX_K"): {3384, 4319}, + ResolutionEnum("EIGHT_K"): {4320, 8639}, +} + +type ResolutionEnum string + +const ( + // 144p + ResolutionEnumVeryLow ResolutionEnum = "VERY_LOW" + // 240p + ResolutionEnumLow ResolutionEnum = "LOW" + // 360p + ResolutionEnumR360p ResolutionEnum = "R360P" + // 480p + ResolutionEnumStandard ResolutionEnum = "STANDARD" + // 540p + ResolutionEnumWebHd ResolutionEnum = "WEB_HD" + // 720p + ResolutionEnumStandardHd ResolutionEnum = "STANDARD_HD" + // 1080p + ResolutionEnumFullHd ResolutionEnum = "FULL_HD" + // 1440p + ResolutionEnumQuadHd ResolutionEnum = "QUAD_HD" + // 1920p + ResolutionEnumVrHd ResolutionEnum = "VR_HD" + // 4k + ResolutionEnumFourK ResolutionEnum = "FOUR_K" + // 5k + ResolutionEnumFiveK ResolutionEnum = "FIVE_K" + // 6k + ResolutionEnumSixK ResolutionEnum = "SIX_K" + // 8k + ResolutionEnumEightK ResolutionEnum = "EIGHT_K" +) + +var AllResolutionEnum = []ResolutionEnum{ + ResolutionEnumVeryLow, + ResolutionEnumLow, + ResolutionEnumR360p, + ResolutionEnumStandard, + ResolutionEnumWebHd, + ResolutionEnumStandardHd, + ResolutionEnumFullHd, + ResolutionEnumQuadHd, + ResolutionEnumVrHd, + ResolutionEnumFourK, + ResolutionEnumFiveK, + ResolutionEnumSixK, + ResolutionEnumEightK, +} + +func (e ResolutionEnum) IsValid() bool { + switch e { + case ResolutionEnumVeryLow, ResolutionEnumLow, ResolutionEnumR360p, ResolutionEnumStandard, ResolutionEnumWebHd, ResolutionEnumStandardHd, ResolutionEnumFullHd, ResolutionEnumQuadHd, ResolutionEnumVrHd, ResolutionEnumFourK, ResolutionEnumFiveK, ResolutionEnumSixK, ResolutionEnumEightK: + return true + } + return false +} + +func (e ResolutionEnum) String() string { + return string(e) +} + +func (e *ResolutionEnum) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = ResolutionEnum(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid ResolutionEnum", str) + } + return nil +} + +func (e ResolutionEnum) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +// GetMaxResolution returns the maximum width or height that media must be +// to qualify as this resolution. +func (e *ResolutionEnum) GetMaxResolution() int { + return resolutionRanges[*e].max +} + +// GetMinResolution returns the minimum width or height that media must be +// to qualify as this resolution. +func (e *ResolutionEnum) GetMinResolution() int { + return resolutionRanges[*e].min +} + +type StreamingResolutionEnum string + +const ( + // 240p + StreamingResolutionEnumLow StreamingResolutionEnum = "LOW" + // 480p + StreamingResolutionEnumStandard StreamingResolutionEnum = "STANDARD" + // 720p + StreamingResolutionEnumStandardHd StreamingResolutionEnum = "STANDARD_HD" + // 1080p + StreamingResolutionEnumFullHd StreamingResolutionEnum = "FULL_HD" + // 4k + StreamingResolutionEnumFourK StreamingResolutionEnum = "FOUR_K" + // Original + StreamingResolutionEnumOriginal StreamingResolutionEnum = "ORIGINAL" +) + +var AllStreamingResolutionEnum = []StreamingResolutionEnum{ + StreamingResolutionEnumLow, + StreamingResolutionEnumStandard, + StreamingResolutionEnumStandardHd, + StreamingResolutionEnumFullHd, + StreamingResolutionEnumFourK, + StreamingResolutionEnumOriginal, +} + +func (e StreamingResolutionEnum) IsValid() bool { + switch e { + case StreamingResolutionEnumLow, StreamingResolutionEnumStandard, StreamingResolutionEnumStandardHd, StreamingResolutionEnumFullHd, StreamingResolutionEnumFourK, StreamingResolutionEnumOriginal: + return true + } + return false +} + +func (e StreamingResolutionEnum) String() string { + return string(e) +} + +func (e *StreamingResolutionEnum) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = StreamingResolutionEnum(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid StreamingResolutionEnum", str) + } + return nil +} + +func (e StreamingResolutionEnum) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +var streamingResolutionMax = map[StreamingResolutionEnum]int{ + StreamingResolutionEnumLow: resolutionRanges[ResolutionEnumLow].min, + StreamingResolutionEnumStandard: resolutionRanges[ResolutionEnumStandard].min, + StreamingResolutionEnumStandardHd: resolutionRanges[ResolutionEnumStandardHd].min, + StreamingResolutionEnumFullHd: resolutionRanges[ResolutionEnumFullHd].min, + StreamingResolutionEnumFourK: resolutionRanges[ResolutionEnumFourK].min, + StreamingResolutionEnumOriginal: 0, +} + +func (e StreamingResolutionEnum) GetMaxResolution() int { + return streamingResolutionMax[e] +} diff --git a/pkg/models/saved_filter.go b/pkg/models/saved_filter.go index e6cd2f8e0..10dd4af36 100644 --- a/pkg/models/saved_filter.go +++ b/pkg/models/saved_filter.go @@ -1,18 +1,20 @@ package models +import "context" + type SavedFilterReader interface { - All() ([]*SavedFilter, error) - Find(id int) (*SavedFilter, error) - FindMany(ids []int, ignoreNotFound bool) ([]*SavedFilter, error) - FindByMode(mode FilterMode) ([]*SavedFilter, error) - FindDefault(mode FilterMode) (*SavedFilter, error) + All(ctx context.Context) ([]*SavedFilter, error) + Find(ctx context.Context, id int) (*SavedFilter, error) + FindMany(ctx context.Context, ids []int, ignoreNotFound bool) ([]*SavedFilter, error) + FindByMode(ctx context.Context, mode FilterMode) ([]*SavedFilter, error) + FindDefault(ctx context.Context, mode FilterMode) (*SavedFilter, error) } type SavedFilterWriter interface { - Create(obj SavedFilter) (*SavedFilter, error) - Update(obj SavedFilter) (*SavedFilter, error) - SetDefault(obj SavedFilter) (*SavedFilter, error) - Destroy(id int) error + Create(ctx context.Context, obj SavedFilter) (*SavedFilter, error) + Update(ctx context.Context, obj SavedFilter) (*SavedFilter, error) + SetDefault(ctx context.Context, obj SavedFilter) (*SavedFilter, error) + Destroy(ctx context.Context, id int) error } type SavedFilterReaderWriter interface { diff --git a/pkg/models/scene.go b/pkg/models/scene.go index 86a131a0f..e9f7a554b 100644 --- a/pkg/models/scene.go +++ b/pkg/models/scene.go @@ -1,5 +1,79 @@ package models +import ( + "context" + + "github.com/stashapp/stash/pkg/file" +) + +type PHashDuplicationCriterionInput struct { + Duplicated *bool `json:"duplicated"` + // Currently unimplemented + Distance *int `json:"distance"` +} + +type SceneFilterType struct { + And *SceneFilterType `json:"AND"` + Or *SceneFilterType `json:"OR"` + Not *SceneFilterType `json:"NOT"` + Title *StringCriterionInput `json:"title"` + Details *StringCriterionInput `json:"details"` + // Filter by file oshash + Oshash *StringCriterionInput `json:"oshash"` + // Filter by file checksum + Checksum *StringCriterionInput `json:"checksum"` + // Filter by file phash + Phash *StringCriterionInput `json:"phash"` + // Filter by path + Path *StringCriterionInput `json:"path"` + // Filter by file count + FileCount *IntCriterionInput `json:"file_count"` + // Filter by rating + Rating *IntCriterionInput `json:"rating"` + // Filter by organized + Organized *bool `json:"organized"` + // Filter by o-counter + OCounter *IntCriterionInput `json:"o_counter"` + // Filter Scenes that have an exact phash match available + Duplicated *PHashDuplicationCriterionInput `json:"duplicated"` + // Filter by resolution + Resolution *ResolutionCriterionInput `json:"resolution"` + // Filter by duration (in seconds) + Duration *IntCriterionInput `json:"duration"` + // Filter to only include scenes which have markers. `true` or `false` + HasMarkers *string `json:"has_markers"` + // Filter to only include scenes missing this property + IsMissing *string `json:"is_missing"` + // Filter to only include scenes with this studio + Studios *HierarchicalMultiCriterionInput `json:"studios"` + // Filter to only include scenes with this movie + Movies *MultiCriterionInput `json:"movies"` + // Filter to only include scenes with these tags + Tags *HierarchicalMultiCriterionInput `json:"tags"` + // Filter by tag count + TagCount *IntCriterionInput `json:"tag_count"` + // Filter to only include scenes with performers with these tags + PerformerTags *HierarchicalMultiCriterionInput `json:"performer_tags"` + // Filter scenes that have performers that have been favorited + PerformerFavorite *bool `json:"performer_favorite"` + // Filter scenes by performer age at time of scene + PerformerAge *IntCriterionInput `json:"performer_age"` + // Filter to only include scenes with these performers + Performers *MultiCriterionInput `json:"performers"` + // Filter by performer count + PerformerCount *IntCriterionInput `json:"performer_count"` + // Filter by StashID + StashID *StringCriterionInput `json:"stash_id"` + // Filter by url + URL *StringCriterionInput `json:"url"` + // Filter by interactive + Interactive *bool `json:"interactive"` + // Filter by InteractiveSpeed + InteractiveSpeed *IntCriterionInput `json:"interactive_speed"` + + Captions *StringCriterionInput `json:"captions"` +} + type SceneQueryOptions struct { QueryOptions SceneFilter *SceneFilterType @@ -18,76 +92,83 @@ type SceneQueryResult struct { resolveErr error } +type SceneDestroyInput struct { + ID string `json:"id"` + DeleteFile *bool `json:"delete_file"` + DeleteGenerated *bool `json:"delete_generated"` +} + +type ScenesDestroyInput struct { + Ids []string `json:"ids"` + DeleteFile *bool `json:"delete_file"` + DeleteGenerated *bool `json:"delete_generated"` +} + func NewSceneQueryResult(finder SceneFinder) *SceneQueryResult { return &SceneQueryResult{ finder: finder, } } -func (r *SceneQueryResult) Resolve() ([]*Scene, error) { +func (r *SceneQueryResult) Resolve(ctx context.Context) ([]*Scene, error) { // cache results if r.scenes == nil && r.resolveErr == nil { - r.scenes, r.resolveErr = r.finder.FindMany(r.IDs) + r.scenes, r.resolveErr = r.finder.FindMany(ctx, r.IDs) } return r.scenes, r.resolveErr } type SceneFinder interface { // TODO - rename this to Find and remove existing method - FindMany(ids []int) ([]*Scene, error) + FindMany(ctx context.Context, ids []int) ([]*Scene, error) } type SceneReader interface { SceneFinder // TODO - remove this in another PR - Find(id int) (*Scene, error) - FindByChecksum(checksum string) (*Scene, error) - FindByOSHash(oshash string) (*Scene, error) - FindByPath(path string) (*Scene, error) - FindByPerformerID(performerID int) ([]*Scene, error) - FindByGalleryID(performerID int) ([]*Scene, error) - FindDuplicates(distance int) ([][]*Scene, error) - CountByPerformerID(performerID int) (int, error) + Find(ctx context.Context, id int) (*Scene, error) + FindByChecksum(ctx context.Context, checksum string) ([]*Scene, error) + FindByOSHash(ctx context.Context, oshash string) ([]*Scene, error) + FindByPath(ctx context.Context, path string) ([]*Scene, error) + FindByPerformerID(ctx context.Context, performerID int) ([]*Scene, error) + FindByGalleryID(ctx context.Context, performerID int) ([]*Scene, error) + FindDuplicates(ctx context.Context, distance int) ([][]*Scene, error) + + GalleryIDLoader + PerformerIDLoader + TagIDLoader + SceneMovieLoader + StashIDLoader + VideoFileLoader + + CountByPerformerID(ctx context.Context, performerID int) (int, error) // FindByStudioID(studioID int) ([]*Scene, error) - FindByMovieID(movieID int) ([]*Scene, error) - CountByMovieID(movieID int) (int, error) - Count() (int, error) - Size() (float64, error) - Duration() (float64, error) + FindByMovieID(ctx context.Context, movieID int) ([]*Scene, error) + CountByMovieID(ctx context.Context, movieID int) (int, error) + Count(ctx context.Context) (int, error) + Size(ctx context.Context) (float64, error) + Duration(ctx context.Context) (float64, error) // SizeCount() (string, error) - CountByStudioID(studioID int) (int, error) - CountByTagID(tagID int) (int, error) - CountMissingChecksum() (int, error) - CountMissingOSHash() (int, error) - Wall(q *string) ([]*Scene, error) - All() ([]*Scene, error) - Query(options SceneQueryOptions) (*SceneQueryResult, error) - GetCaptions(sceneID int) ([]*SceneCaption, error) - GetCover(sceneID int) ([]byte, error) - GetMovies(sceneID int) ([]MoviesScenes, error) - GetTagIDs(sceneID int) ([]int, error) - GetGalleryIDs(sceneID int) ([]int, error) - GetPerformerIDs(sceneID int) ([]int, error) - GetStashIDs(sceneID int) ([]*StashID, error) + CountByStudioID(ctx context.Context, studioID int) (int, error) + CountByTagID(ctx context.Context, tagID int) (int, error) + CountMissingChecksum(ctx context.Context) (int, error) + CountMissingOSHash(ctx context.Context) (int, error) + Wall(ctx context.Context, q *string) ([]*Scene, error) + All(ctx context.Context) ([]*Scene, error) + Query(ctx context.Context, options SceneQueryOptions) (*SceneQueryResult, error) + GetCover(ctx context.Context, sceneID int) ([]byte, error) } type SceneWriter interface { - Create(newScene Scene) (*Scene, error) - Update(updatedScene ScenePartial) (*Scene, error) - UpdateFull(updatedScene Scene) (*Scene, error) - IncrementOCounter(id int) (int, error) - DecrementOCounter(id int) (int, error) - ResetOCounter(id int) (int, error) - UpdateFileModTime(id int, modTime NullSQLiteTimestamp) error - Destroy(id int) error - UpdateCaptions(id int, captions []*SceneCaption) error - UpdateCover(sceneID int, cover []byte) error - DestroyCover(sceneID int) error - UpdatePerformers(sceneID int, performerIDs []int) error - UpdateTags(sceneID int, tagIDs []int) error - UpdateGalleries(sceneID int, galleryIDs []int) error - UpdateMovies(sceneID int, movies []MoviesScenes) error - UpdateStashIDs(sceneID int, stashIDs []StashID) error + Create(ctx context.Context, newScene *Scene, fileIDs []file.ID) error + Update(ctx context.Context, updatedScene *Scene) error + UpdatePartial(ctx context.Context, id int, updatedScene ScenePartial) (*Scene, error) + IncrementOCounter(ctx context.Context, id int) (int, error) + DecrementOCounter(ctx context.Context, id int) (int, error) + ResetOCounter(ctx context.Context, id int) (int, error) + Destroy(ctx context.Context, id int) error + UpdateCover(ctx context.Context, sceneID int, cover []byte) error + DestroyCover(ctx context.Context, sceneID int) error } type SceneReaderWriter interface { diff --git a/pkg/models/scene_marker.go b/pkg/models/scene_marker.go index 49f169098..dd0b786f6 100644 --- a/pkg/models/scene_marker.go +++ b/pkg/models/scene_marker.go @@ -1,21 +1,40 @@ package models +import "context" + +type SceneMarkerFilterType struct { + // Filter to only include scene markers with this tag + TagID *string `json:"tag_id"` + // Filter to only include scene markers with these tags + Tags *HierarchicalMultiCriterionInput `json:"tags"` + // Filter to only include scene markers attached to a scene with these tags + SceneTags *HierarchicalMultiCriterionInput `json:"scene_tags"` + // Filter to only include scene markers with these performers + Performers *MultiCriterionInput `json:"performers"` +} + +type MarkerStringsResultType struct { + Count int `json:"count"` + ID string `json:"id"` + Title string `json:"title"` +} + type SceneMarkerReader interface { - Find(id int) (*SceneMarker, error) - FindMany(ids []int) ([]*SceneMarker, error) - FindBySceneID(sceneID int) ([]*SceneMarker, error) - CountByTagID(tagID int) (int, error) - GetMarkerStrings(q *string, sort *string) ([]*MarkerStringsResultType, error) - Wall(q *string) ([]*SceneMarker, error) - Query(sceneMarkerFilter *SceneMarkerFilterType, findFilter *FindFilterType) ([]*SceneMarker, int, error) - GetTagIDs(imageID int) ([]int, error) + Find(ctx context.Context, id int) (*SceneMarker, error) + FindMany(ctx context.Context, ids []int) ([]*SceneMarker, error) + FindBySceneID(ctx context.Context, sceneID int) ([]*SceneMarker, error) + CountByTagID(ctx context.Context, tagID int) (int, error) + GetMarkerStrings(ctx context.Context, q *string, sort *string) ([]*MarkerStringsResultType, error) + Wall(ctx context.Context, q *string) ([]*SceneMarker, error) + Query(ctx context.Context, sceneMarkerFilter *SceneMarkerFilterType, findFilter *FindFilterType) ([]*SceneMarker, int, error) + GetTagIDs(ctx context.Context, imageID int) ([]int, error) } type SceneMarkerWriter interface { - Create(newSceneMarker SceneMarker) (*SceneMarker, error) - Update(updatedSceneMarker SceneMarker) (*SceneMarker, error) - Destroy(id int) error - UpdateTags(markerID int, tagIDs []int) error + Create(ctx context.Context, newSceneMarker SceneMarker) (*SceneMarker, error) + Update(ctx context.Context, updatedSceneMarker SceneMarker) (*SceneMarker, error) + Destroy(ctx context.Context, id int) error + UpdateTags(ctx context.Context, markerID int, tagIDs []int) error } type SceneMarkerReaderWriter interface { diff --git a/pkg/models/scraped.go b/pkg/models/scraped.go index f57a8409a..be424147b 100644 --- a/pkg/models/scraped.go +++ b/pkg/models/scraped.go @@ -1,15 +1,18 @@ package models -import "errors" +import ( + "context" + "errors" +) var ErrScraperSource = errors.New("invalid ScraperSource") type ScrapedItemReader interface { - All() ([]*ScrapedItem, error) + All(ctx context.Context) ([]*ScrapedItem, error) } type ScrapedItemWriter interface { - Create(newObject ScrapedItem) (*ScrapedItem, error) + Create(ctx context.Context, newObject ScrapedItem) (*ScrapedItem, error) } type ScrapedItemReaderWriter interface { diff --git a/pkg/models/sql.go b/pkg/models/sql.go index f4960d84b..c82f7004a 100644 --- a/pkg/models/sql.go +++ b/pkg/models/sql.go @@ -2,7 +2,6 @@ package models import ( "database/sql" - "strconv" ) func NullString(v string) sql.NullString { @@ -12,43 +11,9 @@ func NullString(v string) sql.NullString { } } -func NullStringPtr(v string) *sql.NullString { - return &sql.NullString{ - String: v, - Valid: true, - } -} - func NullInt64(v int64) sql.NullInt64 { return sql.NullInt64{ Int64: v, Valid: true, } } - -func nullStringPtrToStringPtr(v *sql.NullString) *string { - if v == nil || !v.Valid { - return nil - } - - vv := v.String - return &vv -} - -func nullInt64PtrToIntPtr(v *sql.NullInt64) *int { - if v == nil || !v.Valid { - return nil - } - - vv := int(v.Int64) - return &vv -} - -func nullInt64PtrToStringPtr(v *sql.NullInt64) *string { - if v == nil || !v.Valid { - return nil - } - - vv := strconv.FormatInt(v.Int64, 10) - return &vv -} diff --git a/pkg/models/sqlite_date.go b/pkg/models/sqlite_date.go index 192f7e750..93d3f7963 100644 --- a/pkg/models/sqlite_date.go +++ b/pkg/models/sqlite_date.go @@ -9,11 +9,14 @@ import ( "github.com/stashapp/stash/pkg/utils" ) +// TODO - this should be moved to sqlite type SQLiteDate struct { String string Valid bool } +const sqliteDateLayout = "2006-01-02" + // Scan implements the Scanner interface. func (t *SQLiteDate) Scan(value interface{}) error { dateTime, ok := value.(time.Time) @@ -23,7 +26,7 @@ func (t *SQLiteDate) Scan(value interface{}) error { return nil } - t.String = dateTime.Format("2006-01-02") + t.String = dateTime.Format(sqliteDateLayout) if t.String != "" && t.String != "0001-01-01" { t.Valid = true } else { @@ -44,7 +47,7 @@ func (t SQLiteDate) Value() (driver.Value, error) { return "", nil } - result, err := utils.ParseDateStringAsFormat(s, "2006-01-02") + result, err := utils.ParseDateStringAsFormat(s, sqliteDateLayout) if err != nil { return nil, fmt.Errorf("converting sqlite date %q: %w", s, err) } @@ -59,3 +62,21 @@ func (t *SQLiteDate) StringPtr() *string { vv := t.String return &vv } + +func (t *SQLiteDate) TimePtr() *time.Time { + if t == nil || !t.Valid { + return nil + } + + ret, _ := time.Parse(sqliteDateLayout, t.String) + return &ret +} + +func (t *SQLiteDate) DatePtr() *Date { + if t == nil || !t.Valid { + return nil + } + + ret := NewDate(t.String) + return &ret +} diff --git a/pkg/models/stash_box.go b/pkg/models/stash_box.go index 3e981484b..9c9d7ed2e 100644 --- a/pkg/models/stash_box.go +++ b/pkg/models/stash_box.go @@ -1,39 +1,13 @@ package models -import ( - "fmt" - "strings" -) - -type StashBoxes []*StashBox - -func (sb StashBoxes) ResolveStashBox(source ScraperSourceInput) (*StashBox, error) { - if source.StashBoxIndex != nil { - index := source.StashBoxIndex - if *index < 0 || *index >= len(sb) { - return nil, fmt.Errorf("%w: invalid stash_box_index: %d", ErrScraperSource, index) - } - - return sb[*index], nil - } - - if source.StashBoxEndpoint != nil { - var ret *StashBox - endpoint := *source.StashBoxEndpoint - for _, b := range sb { - if strings.EqualFold(endpoint, b.Endpoint) { - ret = b - } - } - - if ret == nil { - return nil, fmt.Errorf(`%w: stash-box with endpoint "%s"`, ErrNotFound, endpoint) - } - - return ret, nil - } - - // neither stash-box inputs were provided, so assume it is a scraper - - return nil, nil +type StashBoxFingerprint struct { + Algorithm string `json:"algorithm"` + Hash string `json:"hash"` + Duration int `json:"duration"` +} + +type StashBox struct { + Endpoint string `json:"endpoint"` + APIKey string `json:"api_key"` + Name string `json:"name"` } diff --git a/pkg/models/stash_ids.go b/pkg/models/stash_ids.go index fb20522a5..448491e18 100644 --- a/pkg/models/stash_ids.go +++ b/pkg/models/stash_ids.go @@ -1,14 +1,11 @@ package models -func StashIDsFromInput(i []*StashIDInput) []StashID { - var ret []StashID - for _, stashID := range i { - newJoin := StashID{ - StashID: stashID.StashID, - Endpoint: stashID.Endpoint, - } - ret = append(ret, newJoin) - } - - return ret +type StashID struct { + StashID string `db:"stash_id" json:"stash_id"` + Endpoint string `db:"endpoint" json:"endpoint"` +} + +type UpdateStashIDs struct { + StashIDs []StashID `json:"stash_ids"` + Mode RelationshipUpdateMode `json:"mode"` } diff --git a/pkg/models/studio.go b/pkg/models/studio.go index e5d6bfb19..50f8c12b4 100644 --- a/pkg/models/studio.go +++ b/pkg/models/studio.go @@ -1,32 +1,66 @@ package models +import "context" + +type StudioFilterType struct { + And *StudioFilterType `json:"AND"` + Or *StudioFilterType `json:"OR"` + Not *StudioFilterType `json:"NOT"` + Name *StringCriterionInput `json:"name"` + Details *StringCriterionInput `json:"details"` + // Filter to only include studios with this parent studio + Parents *MultiCriterionInput `json:"parents"` + // Filter by StashID + StashID *StringCriterionInput `json:"stash_id"` + // Filter to only include studios missing this property + IsMissing *string `json:"is_missing"` + // Filter by rating + Rating *IntCriterionInput `json:"rating"` + // Filter by scene count + SceneCount *IntCriterionInput `json:"scene_count"` + // Filter by image count + ImageCount *IntCriterionInput `json:"image_count"` + // Filter by gallery count + GalleryCount *IntCriterionInput `json:"gallery_count"` + // Filter by url + URL *StringCriterionInput `json:"url"` + // Filter by studio aliases + Aliases *StringCriterionInput `json:"aliases"` + // Filter by autotag ignore value + IgnoreAutoTag *bool `json:"ignore_auto_tag"` +} + +type StudioFinder interface { + FindMany(ctx context.Context, ids []int) ([]*Studio, error) +} + type StudioReader interface { - Find(id int) (*Studio, error) - FindMany(ids []int) ([]*Studio, error) - FindChildren(id int) ([]*Studio, error) - FindByName(name string, nocase bool) (*Studio, error) - FindByStashID(stashID StashID) ([]*Studio, error) - Count() (int, error) - All() ([]*Studio, error) + Find(ctx context.Context, id int) (*Studio, error) + StudioFinder + FindChildren(ctx context.Context, id int) ([]*Studio, error) + FindByName(ctx context.Context, name string, nocase bool) (*Studio, error) + FindByStashID(ctx context.Context, stashID StashID) ([]*Studio, error) + Count(ctx context.Context) (int, error) + All(ctx context.Context) ([]*Studio, error) // TODO - this interface is temporary until the filter schema can fully // support the query needed - QueryForAutoTag(words []string) ([]*Studio, error) - Query(studioFilter *StudioFilterType, findFilter *FindFilterType) ([]*Studio, int, error) - GetImage(studioID int) ([]byte, error) - HasImage(studioID int) (bool, error) - GetStashIDs(studioID int) ([]*StashID, error) - GetAliases(studioID int) ([]string, error) + QueryForAutoTag(ctx context.Context, words []string) ([]*Studio, error) + Query(ctx context.Context, studioFilter *StudioFilterType, findFilter *FindFilterType) ([]*Studio, int, error) + GetImage(ctx context.Context, studioID int) ([]byte, error) + HasImage(ctx context.Context, studioID int) (bool, error) + StashIDLoader + GetAliases(ctx context.Context, studioID int) ([]string, error) } type StudioWriter interface { - Create(newStudio Studio) (*Studio, error) - Update(updatedStudio StudioPartial) (*Studio, error) - UpdateFull(updatedStudio Studio) (*Studio, error) - Destroy(id int) error - UpdateImage(studioID int, image []byte) error - DestroyImage(studioID int) error - UpdateStashIDs(studioID int, stashIDs []StashID) error - UpdateAliases(studioID int, aliases []string) error + Create(ctx context.Context, newStudio Studio) (*Studio, error) + Update(ctx context.Context, updatedStudio StudioPartial) (*Studio, error) + UpdateFull(ctx context.Context, updatedStudio Studio) (*Studio, error) + Destroy(ctx context.Context, id int) error + UpdateImage(ctx context.Context, studioID int, image []byte) error + DestroyImage(ctx context.Context, studioID int) error + UpdateStashIDs(ctx context.Context, studioID int, stashIDs []StashID) error + UpdateAliases(ctx context.Context, studioID int, aliases []string) error } type StudioReaderWriter interface { diff --git a/pkg/models/tag.go b/pkg/models/tag.go index 747e7a08e..57b9f55d5 100644 --- a/pkg/models/tag.go +++ b/pkg/models/tag.go @@ -1,40 +1,78 @@ package models +import "context" + +type TagFilterType struct { + And *TagFilterType `json:"AND"` + Or *TagFilterType `json:"OR"` + Not *TagFilterType `json:"NOT"` + // Filter by tag name + Name *StringCriterionInput `json:"name"` + // Filter by tag aliases + Aliases *StringCriterionInput `json:"aliases"` + // Filter to only include tags missing this property + IsMissing *string `json:"is_missing"` + // Filter by number of scenes with this tag + SceneCount *IntCriterionInput `json:"scene_count"` + // Filter by number of images with this tag + ImageCount *IntCriterionInput `json:"image_count"` + // Filter by number of galleries with this tag + GalleryCount *IntCriterionInput `json:"gallery_count"` + // Filter by number of performers with this tag + PerformerCount *IntCriterionInput `json:"performer_count"` + // Filter by number of markers with this tag + MarkerCount *IntCriterionInput `json:"marker_count"` + // Filter by parent tags + Parents *HierarchicalMultiCriterionInput `json:"parents"` + // Filter by child tags + Children *HierarchicalMultiCriterionInput `json:"children"` + // Filter by number of parent tags the tag has + ParentCount *IntCriterionInput `json:"parent_count"` + // Filter by number f child tags the tag has + ChildCount *IntCriterionInput `json:"child_count"` + // Filter by autotag ignore value + IgnoreAutoTag *bool `json:"ignore_auto_tag"` +} + +type TagFinder interface { + FindMany(ctx context.Context, ids []int) ([]*Tag, error) +} + type TagReader interface { - Find(id int) (*Tag, error) - FindMany(ids []int) ([]*Tag, error) - FindBySceneID(sceneID int) ([]*Tag, error) - FindByPerformerID(performerID int) ([]*Tag, error) - FindBySceneMarkerID(sceneMarkerID int) ([]*Tag, error) - FindByImageID(imageID int) ([]*Tag, error) - FindByGalleryID(galleryID int) ([]*Tag, error) - FindByName(name string, nocase bool) (*Tag, error) - FindByNames(names []string, nocase bool) ([]*Tag, error) - FindByParentTagID(parentID int) ([]*Tag, error) - FindByChildTagID(childID int) ([]*Tag, error) - Count() (int, error) - All() ([]*Tag, error) + Find(ctx context.Context, id int) (*Tag, error) + TagFinder + FindBySceneID(ctx context.Context, sceneID int) ([]*Tag, error) + FindByPerformerID(ctx context.Context, performerID int) ([]*Tag, error) + FindBySceneMarkerID(ctx context.Context, sceneMarkerID int) ([]*Tag, error) + FindByImageID(ctx context.Context, imageID int) ([]*Tag, error) + FindByGalleryID(ctx context.Context, galleryID int) ([]*Tag, error) + FindByName(ctx context.Context, name string, nocase bool) (*Tag, error) + FindByNames(ctx context.Context, names []string, nocase bool) ([]*Tag, error) + FindByParentTagID(ctx context.Context, parentID int) ([]*Tag, error) + FindByChildTagID(ctx context.Context, childID int) ([]*Tag, error) + Count(ctx context.Context) (int, error) + All(ctx context.Context) ([]*Tag, error) // TODO - this interface is temporary until the filter schema can fully // support the query needed - QueryForAutoTag(words []string) ([]*Tag, error) - Query(tagFilter *TagFilterType, findFilter *FindFilterType) ([]*Tag, int, error) - GetImage(tagID int) ([]byte, error) - GetAliases(tagID int) ([]string, error) - FindAllAncestors(tagID int, excludeIDs []int) ([]*TagPath, error) - FindAllDescendants(tagID int, excludeIDs []int) ([]*TagPath, error) + QueryForAutoTag(ctx context.Context, words []string) ([]*Tag, error) + Query(ctx context.Context, tagFilter *TagFilterType, findFilter *FindFilterType) ([]*Tag, int, error) + GetImage(ctx context.Context, tagID int) ([]byte, error) + GetAliases(ctx context.Context, tagID int) ([]string, error) + FindAllAncestors(ctx context.Context, tagID int, excludeIDs []int) ([]*TagPath, error) + FindAllDescendants(ctx context.Context, tagID int, excludeIDs []int) ([]*TagPath, error) } type TagWriter interface { - Create(newTag Tag) (*Tag, error) - Update(updateTag TagPartial) (*Tag, error) - UpdateFull(updatedTag Tag) (*Tag, error) - Destroy(id int) error - UpdateImage(tagID int, image []byte) error - DestroyImage(tagID int) error - UpdateAliases(tagID int, aliases []string) error - Merge(source []int, destination int) error - UpdateParentTags(tagID int, parentIDs []int) error - UpdateChildTags(tagID int, parentIDs []int) error + Create(ctx context.Context, newTag Tag) (*Tag, error) + Update(ctx context.Context, updateTag TagPartial) (*Tag, error) + UpdateFull(ctx context.Context, updatedTag Tag) (*Tag, error) + Destroy(ctx context.Context, id int) error + UpdateImage(ctx context.Context, tagID int, image []byte) error + DestroyImage(ctx context.Context, tagID int) error + UpdateAliases(ctx context.Context, tagID int, aliases []string) error + Merge(ctx context.Context, source []int, destination int) error + UpdateParentTags(ctx context.Context, tagID int, parentIDs []int) error + UpdateChildTags(ctx context.Context, tagID int, parentIDs []int) error } type TagReaderWriter interface { diff --git a/pkg/models/transaction.go b/pkg/models/transaction.go deleted file mode 100644 index 291038b0c..000000000 --- a/pkg/models/transaction.go +++ /dev/null @@ -1,86 +0,0 @@ -package models - -import ( - "context" - - "github.com/stashapp/stash/pkg/logger" -) - -type Transaction interface { - Begin() error - Rollback() error - Commit() error - Repository() Repository -} - -type ReadTransaction interface { - Begin() error - Rollback() error - Commit() error - Repository() ReaderRepository -} - -type TransactionManager interface { - WithTxn(ctx context.Context, fn func(r Repository) error) error - WithReadTxn(ctx context.Context, fn func(r ReaderRepository) error) error -} - -func WithTxn(txn Transaction, fn func(r Repository) error) error { - err := txn.Begin() - if err != nil { - return err - } - - defer func() { - if p := recover(); p != nil { - // a panic occurred, rollback and repanic - if err := txn.Rollback(); err != nil { - logger.Warnf("error while trying to roll back transaction: %v", err) - } - panic(p) - } - - if err != nil { - // something went wrong, rollback - if err := txn.Rollback(); err != nil { - logger.Warnf("error while trying to roll back transaction: %v", err) - } - } else { - // all good, commit - err = txn.Commit() - } - }() - - err = fn(txn.Repository()) - return err -} - -func WithROTxn(txn ReadTransaction, fn func(r ReaderRepository) error) error { - err := txn.Begin() - if err != nil { - return err - } - - defer func() { - if p := recover(); p != nil { - // a panic occurred, rollback and repanic - if err := txn.Rollback(); err != nil { - logger.Warnf("error while trying to roll back RO transaction: %v", err) - } - panic(p) - } - - if err != nil { - // something went wrong, rollback - if err := txn.Rollback(); err != nil { - logger.Warnf("error while trying to roll back RO transaction: %v", err) - } - } else { - // all good, commit - err = txn.Commit() - } - }() - - err = fn(txn.Repository()) - return err -} diff --git a/pkg/models/update.go b/pkg/models/update.go new file mode 100644 index 000000000..ecc9314ec --- /dev/null +++ b/pkg/models/update.go @@ -0,0 +1,65 @@ +package models + +import ( + "fmt" + "io" + "strconv" + + "github.com/stashapp/stash/pkg/sliceutil/intslice" +) + +type RelationshipUpdateMode string + +const ( + RelationshipUpdateModeSet RelationshipUpdateMode = "SET" + RelationshipUpdateModeAdd RelationshipUpdateMode = "ADD" + RelationshipUpdateModeRemove RelationshipUpdateMode = "REMOVE" +) + +var AllRelationshipUpdateMode = []RelationshipUpdateMode{ + RelationshipUpdateModeSet, + RelationshipUpdateModeAdd, + RelationshipUpdateModeRemove, +} + +func (e RelationshipUpdateMode) IsValid() bool { + switch e { + case RelationshipUpdateModeSet, RelationshipUpdateModeAdd, RelationshipUpdateModeRemove: + return true + } + return false +} + +func (e RelationshipUpdateMode) String() string { + return string(e) +} + +func (e *RelationshipUpdateMode) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = RelationshipUpdateMode(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid RelationshipUpdateMode", str) + } + return nil +} + +func (e RelationshipUpdateMode) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type UpdateIDs struct { + IDs []int `json:"ids"` + Mode RelationshipUpdateMode `json:"mode"` +} + +func (u *UpdateIDs) IDStrings() []string { + if u == nil { + return nil + } + + return intslice.IntSliceToStringSlice(u.IDs) +} diff --git a/pkg/models/value.go b/pkg/models/value.go new file mode 100644 index 000000000..0adff1f83 --- /dev/null +++ b/pkg/models/value.go @@ -0,0 +1,249 @@ +package models + +import ( + "strconv" + "time" +) + +// OptionalString represents an optional string argument that may be null. +// A value is only considered null if both Set and Null is true. +type OptionalString struct { + Value string + Null bool + Set bool +} + +// Ptr returns a pointer to the underlying value. Returns nil if Set is false or Null is true. +func (o *OptionalString) Ptr() *string { + if !o.Set || o.Null { + return nil + } + + v := o.Value + return &v +} + +// NewOptionalString returns a new OptionalString with the given value. +func NewOptionalString(v string) OptionalString { + return OptionalString{v, false, true} +} + +// NewOptionalStringPtr returns a new OptionalString with the given value. +// If the value is nil, the returned OptionalString will be set and null. +func NewOptionalStringPtr(v *string) OptionalString { + if v == nil { + return OptionalString{ + Null: true, + Set: true, + } + } + + return OptionalString{*v, false, true} +} + +// OptionalInt represents an optional int argument that may be null. See OptionalString. +type OptionalInt struct { + Value int + Null bool + Set bool +} + +// Ptr returns a pointer to the underlying value. Returns nil if Set is false or Null is true. +func (o *OptionalInt) Ptr() *int { + if !o.Set || o.Null { + return nil + } + + v := o.Value + return &v +} + +// NewOptionalInt returns a new OptionalInt with the given value. +func NewOptionalInt(v int) OptionalInt { + return OptionalInt{v, false, true} +} + +// NewOptionalIntPtr returns a new OptionalInt with the given value. +// If the value is nil, the returned OptionalInt will be set and null. +func NewOptionalIntPtr(v *int) OptionalInt { + if v == nil { + return OptionalInt{ + Null: true, + Set: true, + } + } + + return OptionalInt{*v, false, true} +} + +// StringPtr returns a pointer to a string representation of the value. +// Returns nil if Set is false or null is true. +func (o *OptionalInt) StringPtr() *string { + if !o.Set || o.Null { + return nil + } + + v := strconv.Itoa(o.Value) + return &v +} + +// OptionalInt64 represents an optional int64 argument that may be null. See OptionalString. +type OptionalInt64 struct { + Value int64 + Null bool + Set bool +} + +// Ptr returns a pointer to the underlying value. Returns nil if Set is false or Null is true. +func (o *OptionalInt64) Ptr() *int64 { + if !o.Set || o.Null { + return nil + } + + v := o.Value + return &v +} + +// NewOptionalInt64 returns a new OptionalInt64 with the given value. +func NewOptionalInt64(v int64) OptionalInt64 { + return OptionalInt64{v, false, true} +} + +// NewOptionalInt64Ptr returns a new OptionalInt64 with the given value. +// If the value is nil, the returned OptionalInt64 will be set and null. +func NewOptionalInt64Ptr(v *int64) OptionalInt64 { + if v == nil { + return OptionalInt64{ + Null: true, + Set: true, + } + } + + return OptionalInt64{*v, false, true} +} + +// OptionalBool represents an optional int64 argument that may be null. See OptionalString. +type OptionalBool struct { + Value bool + Null bool + Set bool +} + +func (o *OptionalBool) Ptr() *bool { + if !o.Set || o.Null { + return nil + } + + v := o.Value + return &v +} + +// NewOptionalBool returns a new OptionalBool with the given value. +func NewOptionalBool(v bool) OptionalBool { + return OptionalBool{v, false, true} +} + +// NewOptionalBoolPtr returns a new OptionalBool with the given value. +// If the value is nil, the returned OptionalBool will be set and null. +func NewOptionalBoolPtr(v *bool) OptionalBool { + if v == nil { + return OptionalBool{ + Null: true, + Set: true, + } + } + + return OptionalBool{*v, false, true} +} + +// OptionalBool represents an optional float64 argument that may be null. See OptionalString. +type OptionalFloat64 struct { + Value float64 + Null bool + Set bool +} + +// Ptr returns a pointer to the underlying value. Returns nil if Set is false or Null is true. +func (o *OptionalFloat64) Ptr() *float64 { + if !o.Set || o.Null { + return nil + } + + v := o.Value + return &v +} + +// NewOptionalFloat64 returns a new OptionalFloat64 with the given value. +func NewOptionalFloat64(v float64) OptionalFloat64 { + return OptionalFloat64{v, false, true} +} + +// OptionalDate represents an optional date argument that may be null. See OptionalString. +type OptionalDate struct { + Value Date + Null bool + Set bool +} + +// Ptr returns a pointer to the underlying value. Returns nil if Set is false or Null is true. +func (o *OptionalDate) Ptr() *Date { + if !o.Set || o.Null { + return nil + } + + v := o.Value + return &v +} + +// NewOptionalDate returns a new OptionalDate with the given value. +func NewOptionalDate(v Date) OptionalDate { + return OptionalDate{v, false, true} +} + +// NewOptionalBoolPtr returns a new OptionalDate with the given value. +// If the value is nil, the returned OptionalDate will be set and null. +func NewOptionalDatePtr(v *Date) OptionalDate { + if v == nil { + return OptionalDate{ + Null: true, + Set: true, + } + } + + return OptionalDate{*v, false, true} +} + +// OptionalTime represents an optional time argument that may be null. See OptionalString. +type OptionalTime struct { + Value time.Time + Null bool + Set bool +} + +// NewOptionalTime returns a new OptionalTime with the given value. +func NewOptionalTime(v time.Time) OptionalTime { + return OptionalTime{v, false, true} +} + +// NewOptionalTimePtr returns a new OptionalTime with the given value. +// If the value is nil, the returned OptionalTime will be set and null. +func NewOptionalTimePtr(v *time.Time) OptionalTime { + if v == nil { + return OptionalTime{ + Null: true, + Set: true, + } + } + + return OptionalTime{*v, false, true} +} + +// Ptr returns a pointer to the underlying value. Returns nil if Set is false or Null is true. +func (o *OptionalTime) Ptr() *time.Time { + if !o.Set || o.Null { + return nil + } + + v := o.Value + return &v +} diff --git a/pkg/movie/export.go b/pkg/movie/export.go index a70e30290..2af697a49 100644 --- a/pkg/movie/export.go +++ b/pkg/movie/export.go @@ -1,16 +1,23 @@ package movie import ( + "context" "fmt" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/studio" "github.com/stashapp/stash/pkg/utils" ) +type ImageGetter interface { + GetFrontImage(ctx context.Context, movieID int) ([]byte, error) + GetBackImage(ctx context.Context, movieID int) ([]byte, error) +} + // ToJSON converts a Movie into its JSON equivalent. -func ToJSON(reader models.MovieReader, studioReader models.StudioReader, movie *models.Movie) (*jsonschema.Movie, error) { +func ToJSON(ctx context.Context, reader ImageGetter, studioReader studio.Finder, movie *models.Movie) (*jsonschema.Movie, error) { newMovieJSON := jsonschema.Movie{ CreatedAt: json.JSONTime{Time: movie.CreatedAt.Timestamp}, UpdatedAt: json.JSONTime{Time: movie.UpdatedAt.Timestamp}, @@ -45,7 +52,7 @@ func ToJSON(reader models.MovieReader, studioReader models.StudioReader, movie * } if movie.StudioID.Valid { - studio, err := studioReader.Find(int(movie.StudioID.Int64)) + studio, err := studioReader.Find(ctx, int(movie.StudioID.Int64)) if err != nil { return nil, fmt.Errorf("error getting movie studio: %v", err) } @@ -55,7 +62,7 @@ func ToJSON(reader models.MovieReader, studioReader models.StudioReader, movie * } } - frontImage, err := reader.GetFrontImage(movie.ID) + frontImage, err := reader.GetFrontImage(ctx, movie.ID) if err != nil { return nil, fmt.Errorf("error getting movie front image: %v", err) } @@ -64,7 +71,7 @@ func ToJSON(reader models.MovieReader, studioReader models.StudioReader, movie * newMovieJSON.FrontImage = utils.GetBase64StringFromData(frontImage) } - backImage, err := reader.GetBackImage(movie.ID) + backImage, err := reader.GetBackImage(ctx, movie.ID) if err != nil { return nil, fmt.Errorf("error getting movie back image: %v", err) } diff --git a/pkg/movie/export_test.go b/pkg/movie/export_test.go index 11be97b7b..007383902 100644 --- a/pkg/movie/export_test.go +++ b/pkg/movie/export_test.go @@ -55,7 +55,7 @@ var ( backImageBytes = []byte("backImageBytes") ) -var studio models.Studio = models.Studio{ +var movieStudio models.Studio = models.Studio{ Name: models.NullString(studioName), } @@ -189,30 +189,30 @@ func TestToJSON(t *testing.T) { imageErr := errors.New("error getting image") - mockMovieReader.On("GetFrontImage", movieID).Return(frontImageBytes, nil).Once() - mockMovieReader.On("GetFrontImage", missingStudioMovieID).Return(frontImageBytes, nil).Once() - mockMovieReader.On("GetFrontImage", emptyID).Return(nil, nil).Once().Maybe() - mockMovieReader.On("GetFrontImage", errFrontImageID).Return(nil, imageErr).Once() - mockMovieReader.On("GetFrontImage", errBackImageID).Return(frontImageBytes, nil).Once() + mockMovieReader.On("GetFrontImage", testCtx, movieID).Return(frontImageBytes, nil).Once() + mockMovieReader.On("GetFrontImage", testCtx, missingStudioMovieID).Return(frontImageBytes, nil).Once() + mockMovieReader.On("GetFrontImage", testCtx, emptyID).Return(nil, nil).Once().Maybe() + mockMovieReader.On("GetFrontImage", testCtx, errFrontImageID).Return(nil, imageErr).Once() + mockMovieReader.On("GetFrontImage", testCtx, errBackImageID).Return(frontImageBytes, nil).Once() - mockMovieReader.On("GetBackImage", movieID).Return(backImageBytes, nil).Once() - mockMovieReader.On("GetBackImage", missingStudioMovieID).Return(backImageBytes, nil).Once() - mockMovieReader.On("GetBackImage", emptyID).Return(nil, nil).Once() - mockMovieReader.On("GetBackImage", errBackImageID).Return(nil, imageErr).Once() - mockMovieReader.On("GetBackImage", errFrontImageID).Return(backImageBytes, nil).Maybe() - mockMovieReader.On("GetBackImage", errStudioMovieID).Return(backImageBytes, nil).Maybe() + mockMovieReader.On("GetBackImage", testCtx, movieID).Return(backImageBytes, nil).Once() + mockMovieReader.On("GetBackImage", testCtx, missingStudioMovieID).Return(backImageBytes, nil).Once() + mockMovieReader.On("GetBackImage", testCtx, emptyID).Return(nil, nil).Once() + mockMovieReader.On("GetBackImage", testCtx, errBackImageID).Return(nil, imageErr).Once() + mockMovieReader.On("GetBackImage", testCtx, errFrontImageID).Return(backImageBytes, nil).Maybe() + mockMovieReader.On("GetBackImage", testCtx, errStudioMovieID).Return(backImageBytes, nil).Maybe() mockStudioReader := &mocks.StudioReaderWriter{} studioErr := errors.New("error getting studio") - mockStudioReader.On("Find", studioID).Return(&studio, nil) - mockStudioReader.On("Find", missingStudioID).Return(nil, nil) - mockStudioReader.On("Find", errStudioID).Return(nil, studioErr) + mockStudioReader.On("Find", testCtx, studioID).Return(&movieStudio, nil) + mockStudioReader.On("Find", testCtx, missingStudioID).Return(nil, nil) + mockStudioReader.On("Find", testCtx, errStudioID).Return(nil, studioErr) for i, s := range scenarios { movie := s.movie - json, err := ToJSON(mockMovieReader, mockStudioReader, &movie) + json, err := ToJSON(testCtx, mockMovieReader, mockStudioReader, &movie) switch { case !s.err && err != nil: diff --git a/pkg/movie/import.go b/pkg/movie/import.go index 6afdef8b9..461df0f84 100644 --- a/pkg/movie/import.go +++ b/pkg/movie/import.go @@ -1,18 +1,26 @@ package movie import ( + "context" "database/sql" "fmt" "github.com/stashapp/stash/pkg/hash/md5" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/studio" "github.com/stashapp/stash/pkg/utils" ) +type NameFinderCreatorUpdater interface { + NameFinderCreator + UpdateFull(ctx context.Context, updatedMovie models.Movie) (*models.Movie, error) + UpdateImages(ctx context.Context, movieID int, frontImage []byte, backImage []byte) error +} + type Importer struct { - ReaderWriter models.MovieReaderWriter - StudioWriter models.StudioReaderWriter + ReaderWriter NameFinderCreatorUpdater + StudioWriter studio.NameFinderCreator Input jsonschema.Movie MissingRefBehaviour models.ImportMissingRefEnum @@ -21,10 +29,10 @@ type Importer struct { backImageData []byte } -func (i *Importer) PreImport() error { +func (i *Importer) PreImport(ctx context.Context) error { i.movie = i.movieJSONToMovie(i.Input) - if err := i.populateStudio(); err != nil { + if err := i.populateStudio(ctx); err != nil { return err } @@ -71,9 +79,9 @@ func (i *Importer) movieJSONToMovie(movieJSON jsonschema.Movie) models.Movie { return newMovie } -func (i *Importer) populateStudio() error { +func (i *Importer) populateStudio(ctx context.Context) error { if i.Input.Studio != "" { - studio, err := i.StudioWriter.FindByName(i.Input.Studio, false) + studio, err := i.StudioWriter.FindByName(ctx, i.Input.Studio, false) if err != nil { return fmt.Errorf("error finding studio by name: %v", err) } @@ -88,7 +96,7 @@ func (i *Importer) populateStudio() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - studioID, err := i.createStudio(i.Input.Studio) + studioID, err := i.createStudio(ctx, i.Input.Studio) if err != nil { return err } @@ -105,10 +113,10 @@ func (i *Importer) populateStudio() error { return nil } -func (i *Importer) createStudio(name string) (int, error) { +func (i *Importer) createStudio(ctx context.Context, name string) (int, error) { newStudio := *models.NewStudio(name) - created, err := i.StudioWriter.Create(newStudio) + created, err := i.StudioWriter.Create(ctx, newStudio) if err != nil { return 0, err } @@ -116,9 +124,9 @@ func (i *Importer) createStudio(name string) (int, error) { return created.ID, nil } -func (i *Importer) PostImport(id int) error { +func (i *Importer) PostImport(ctx context.Context, id int) error { if len(i.frontImageData) > 0 { - if err := i.ReaderWriter.UpdateImages(id, i.frontImageData, i.backImageData); err != nil { + if err := i.ReaderWriter.UpdateImages(ctx, id, i.frontImageData, i.backImageData); err != nil { return fmt.Errorf("error setting movie images: %v", err) } } @@ -130,9 +138,9 @@ func (i *Importer) Name() string { return i.Input.Name } -func (i *Importer) FindExistingID() (*int, error) { +func (i *Importer) FindExistingID(ctx context.Context) (*int, error) { const nocase = false - existing, err := i.ReaderWriter.FindByName(i.Name(), nocase) + existing, err := i.ReaderWriter.FindByName(ctx, i.Name(), nocase) if err != nil { return nil, err } @@ -145,8 +153,8 @@ func (i *Importer) FindExistingID() (*int, error) { return nil, nil } -func (i *Importer) Create() (*int, error) { - created, err := i.ReaderWriter.Create(i.movie) +func (i *Importer) Create(ctx context.Context) (*int, error) { + created, err := i.ReaderWriter.Create(ctx, i.movie) if err != nil { return nil, fmt.Errorf("error creating movie: %v", err) } @@ -155,10 +163,10 @@ func (i *Importer) Create() (*int, error) { return &id, nil } -func (i *Importer) Update(id int) error { +func (i *Importer) Update(ctx context.Context, id int) error { movie := i.movie movie.ID = id - _, err := i.ReaderWriter.UpdateFull(movie) + _, err := i.ReaderWriter.UpdateFull(ctx, movie) if err != nil { return fmt.Errorf("error updating existing movie: %v", err) } diff --git a/pkg/movie/import_test.go b/pkg/movie/import_test.go index 7aff71d47..26b6d9f27 100644 --- a/pkg/movie/import_test.go +++ b/pkg/movie/import_test.go @@ -1,6 +1,7 @@ package movie import ( + "context" "errors" "testing" @@ -27,6 +28,8 @@ const ( errImageID = 3 ) +var testCtx = context.Background() + func TestImporterName(t *testing.T) { i := Importer{ Input: jsonschema.Movie{ @@ -45,23 +48,23 @@ func TestImporterPreImport(t *testing.T) { }, } - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.Input.FrontImage = frontImage i.Input.BackImage = invalidImage - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) i.Input.BackImage = "" - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.Input.BackImage = backImage - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) } @@ -79,17 +82,17 @@ func TestImporterPreImportWithStudio(t *testing.T) { }, } - studioReaderWriter.On("FindByName", existingStudioName, false).Return(&models.Studio{ + studioReaderWriter.On("FindByName", testCtx, existingStudioName, false).Return(&models.Studio{ ID: existingStudioID, }, nil).Once() - studioReaderWriter.On("FindByName", existingStudioErr, false).Return(nil, errors.New("FindByName error")).Once() + studioReaderWriter.On("FindByName", testCtx, existingStudioErr, false).Return(nil, errors.New("FindByName error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) assert.Equal(t, int64(existingStudioID), i.movie.StudioID.Int64) i.Input.Studio = existingStudioErr - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) studioReaderWriter.AssertExpectations(t) @@ -108,20 +111,20 @@ func TestImporterPreImportWithMissingStudio(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - studioReaderWriter.On("FindByName", missingStudioName, false).Return(nil, nil).Times(3) - studioReaderWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(&models.Studio{ + studioReaderWriter.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Times(3) + studioReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Studio")).Return(&models.Studio{ ID: existingStudioID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) assert.Equal(t, int64(existingStudioID), i.movie.StudioID.Int64) @@ -141,10 +144,10 @@ func TestImporterPreImportWithMissingStudioCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - studioReaderWriter.On("FindByName", missingStudioName, false).Return(nil, nil).Once() - studioReaderWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) + studioReaderWriter.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Once() + studioReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } @@ -159,13 +162,13 @@ func TestImporterPostImport(t *testing.T) { updateMovieImageErr := errors.New("UpdateImages error") - readerWriter.On("UpdateImages", movieID, frontImageBytes, backImageBytes).Return(nil).Once() - readerWriter.On("UpdateImages", errImageID, frontImageBytes, backImageBytes).Return(updateMovieImageErr).Once() + readerWriter.On("UpdateImages", testCtx, movieID, frontImageBytes, backImageBytes).Return(nil).Once() + readerWriter.On("UpdateImages", testCtx, errImageID, frontImageBytes, backImageBytes).Return(updateMovieImageErr).Once() - err := i.PostImport(movieID) + err := i.PostImport(testCtx, movieID) assert.Nil(t, err) - err = i.PostImport(errImageID) + err = i.PostImport(testCtx, errImageID) assert.NotNil(t, err) readerWriter.AssertExpectations(t) @@ -182,23 +185,23 @@ func TestImporterFindExistingID(t *testing.T) { } errFindByName := errors.New("FindByName error") - readerWriter.On("FindByName", movieName, false).Return(nil, nil).Once() - readerWriter.On("FindByName", existingMovieName, false).Return(&models.Movie{ + readerWriter.On("FindByName", testCtx, movieName, false).Return(nil, nil).Once() + readerWriter.On("FindByName", testCtx, existingMovieName, false).Return(&models.Movie{ ID: existingMovieID, }, nil).Once() - readerWriter.On("FindByName", movieNameErr, false).Return(nil, errFindByName).Once() + readerWriter.On("FindByName", testCtx, movieNameErr, false).Return(nil, errFindByName).Once() - id, err := i.FindExistingID() + id, err := i.FindExistingID(testCtx) assert.Nil(t, id) assert.Nil(t, err) i.Input.Name = existingMovieName - id, err = i.FindExistingID() + id, err = i.FindExistingID(testCtx) assert.Equal(t, existingMovieID, *id) assert.Nil(t, err) i.Input.Name = movieNameErr - id, err = i.FindExistingID() + id, err = i.FindExistingID(testCtx) assert.Nil(t, id) assert.NotNil(t, err) @@ -222,17 +225,17 @@ func TestCreate(t *testing.T) { } errCreate := errors.New("Create error") - readerWriter.On("Create", movie).Return(&models.Movie{ + readerWriter.On("Create", testCtx, movie).Return(&models.Movie{ ID: movieID, }, nil).Once() - readerWriter.On("Create", movieErr).Return(nil, errCreate).Once() + readerWriter.On("Create", testCtx, movieErr).Return(nil, errCreate).Once() - id, err := i.Create() + id, err := i.Create(testCtx) assert.Equal(t, movieID, *id) assert.Nil(t, err) i.movie = movieErr - id, err = i.Create() + id, err = i.Create(testCtx) assert.Nil(t, id) assert.NotNil(t, err) @@ -259,18 +262,18 @@ func TestUpdate(t *testing.T) { // id needs to be set for the mock input movie.ID = movieID - readerWriter.On("UpdateFull", movie).Return(nil, nil).Once() + readerWriter.On("UpdateFull", testCtx, movie).Return(nil, nil).Once() - err := i.Update(movieID) + err := i.Update(testCtx, movieID) assert.Nil(t, err) i.movie = movieErr // need to set id separately movieErr.ID = errImageID - readerWriter.On("UpdateFull", movieErr).Return(nil, errUpdate).Once() + readerWriter.On("UpdateFull", testCtx, movieErr).Return(nil, errUpdate).Once() - err = i.Update(errImageID) + err = i.Update(testCtx, errImageID) assert.NotNil(t, err) readerWriter.AssertExpectations(t) diff --git a/pkg/movie/update.go b/pkg/movie/update.go new file mode 100644 index 000000000..48dc9c123 --- /dev/null +++ b/pkg/movie/update.go @@ -0,0 +1,12 @@ +package movie + +import ( + "context" + + "github.com/stashapp/stash/pkg/models" +) + +type NameFinderCreator interface { + FindByName(ctx context.Context, name string, nocase bool) (*models.Movie, error) + Create(ctx context.Context, newMovie models.Movie) (*models.Movie, error) +} diff --git a/pkg/performer/export.go b/pkg/performer/export.go index 240d0fc28..9a1a9c701 100644 --- a/pkg/performer/export.go +++ b/pkg/performer/export.go @@ -1,6 +1,7 @@ package performer import ( + "context" "fmt" "github.com/stashapp/stash/pkg/models" @@ -9,8 +10,13 @@ import ( "github.com/stashapp/stash/pkg/utils" ) +type ImageStashIDGetter interface { + GetImage(ctx context.Context, performerID int) ([]byte, error) + models.StashIDLoader +} + // ToJSON converts a Performer object into its JSON equivalent. -func ToJSON(reader models.PerformerReader, performer *models.Performer) (*jsonschema.Performer, error) { +func ToJSON(ctx context.Context, reader ImageStashIDGetter, performer *models.Performer) (*jsonschema.Performer, error) { newPerformerJSON := jsonschema.Performer{ IgnoreAutoTag: performer.IgnoreAutoTag, CreatedAt: json.JSONTime{Time: performer.CreatedAt.Timestamp}, @@ -84,7 +90,7 @@ func ToJSON(reader models.PerformerReader, performer *models.Performer) (*jsonsc newPerformerJSON.Weight = int(performer.Weight.Int64) } - image, err := reader.GetImage(performer.ID) + image, err := reader.GetImage(ctx, performer.ID) if err != nil { return nil, fmt.Errorf("error getting performers image: %v", err) } @@ -93,7 +99,7 @@ func ToJSON(reader models.PerformerReader, performer *models.Performer) (*jsonsc newPerformerJSON.Image = utils.GetBase64StringFromData(image) } - stashIDs, _ := reader.GetStashIDs(performer.ID) + stashIDs, _ := reader.GetStashIDs(ctx, performer.ID) var ret []models.StashID for _, stashID := range stashIDs { newJoin := models.StashID{ diff --git a/pkg/performer/export_test.go b/pkg/performer/export_test.go index 7cfdabb7f..f328c0d8c 100644 --- a/pkg/performer/export_test.go +++ b/pkg/performer/export_test.go @@ -50,8 +50,8 @@ var stashID = models.StashID{ StashID: "StashID", Endpoint: "Endpoint", } -var stashIDs = []*models.StashID{ - &stashID, +var stashIDs = []models.StashID{ + stashID, } const image = "aW1hZ2VCeXRlcw==" @@ -208,16 +208,16 @@ func TestToJSON(t *testing.T) { imageErr := errors.New("error getting image") - mockPerformerReader.On("GetImage", performerID).Return(imageBytes, nil).Once() - mockPerformerReader.On("GetImage", noImageID).Return(nil, nil).Once() - mockPerformerReader.On("GetImage", errImageID).Return(nil, imageErr).Once() + mockPerformerReader.On("GetImage", testCtx, performerID).Return(imageBytes, nil).Once() + mockPerformerReader.On("GetImage", testCtx, noImageID).Return(nil, nil).Once() + mockPerformerReader.On("GetImage", testCtx, errImageID).Return(nil, imageErr).Once() - mockPerformerReader.On("GetStashIDs", performerID).Return(stashIDs, nil).Once() - mockPerformerReader.On("GetStashIDs", noImageID).Return(nil, nil).Once() + mockPerformerReader.On("GetStashIDs", testCtx, performerID).Return(stashIDs, nil).Once() + mockPerformerReader.On("GetStashIDs", testCtx, noImageID).Return(nil, nil).Once() for i, s := range scenarios { tag := s.input - json, err := ToJSON(mockPerformerReader, &tag) + json, err := ToJSON(testCtx, mockPerformerReader, &tag) switch { case !s.err && err != nil: diff --git a/pkg/performer/import.go b/pkg/performer/import.go index 9e4ec77f7..7c673fb34 100644 --- a/pkg/performer/import.go +++ b/pkg/performer/import.go @@ -1,6 +1,7 @@ package performer import ( + "context" "database/sql" "fmt" "strings" @@ -9,12 +10,21 @@ import ( "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/jsonschema" "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/tag" "github.com/stashapp/stash/pkg/utils" ) +type NameFinderCreatorUpdater interface { + NameFinderCreator + UpdateFull(ctx context.Context, updatedPerformer models.Performer) (*models.Performer, error) + UpdateTags(ctx context.Context, performerID int, tagIDs []int) error + UpdateImage(ctx context.Context, performerID int, image []byte) error + UpdateStashIDs(ctx context.Context, performerID int, stashIDs []models.StashID) error +} + type Importer struct { - ReaderWriter models.PerformerReaderWriter - TagWriter models.TagReaderWriter + ReaderWriter NameFinderCreatorUpdater + TagWriter tag.NameFinderCreator Input jsonschema.Performer MissingRefBehaviour models.ImportMissingRefEnum @@ -25,10 +35,10 @@ type Importer struct { tags []*models.Tag } -func (i *Importer) PreImport() error { +func (i *Importer) PreImport(ctx context.Context) error { i.performer = performerJSONToPerformer(i.Input) - if err := i.populateTags(); err != nil { + if err := i.populateTags(ctx); err != nil { return err } @@ -43,10 +53,10 @@ func (i *Importer) PreImport() error { return nil } -func (i *Importer) populateTags() error { +func (i *Importer) populateTags(ctx context.Context) error { if len(i.Input.Tags) > 0 { - tags, err := importTags(i.TagWriter, i.Input.Tags, i.MissingRefBehaviour) + tags, err := importTags(ctx, i.TagWriter, i.Input.Tags, i.MissingRefBehaviour) if err != nil { return err } @@ -57,8 +67,8 @@ func (i *Importer) populateTags() error { return nil } -func importTags(tagWriter models.TagReaderWriter, names []string, missingRefBehaviour models.ImportMissingRefEnum) ([]*models.Tag, error) { - tags, err := tagWriter.FindByNames(names, false) +func importTags(ctx context.Context, tagWriter tag.NameFinderCreator, names []string, missingRefBehaviour models.ImportMissingRefEnum) ([]*models.Tag, error) { + tags, err := tagWriter.FindByNames(ctx, names, false) if err != nil { return nil, err } @@ -78,7 +88,7 @@ func importTags(tagWriter models.TagReaderWriter, names []string, missingRefBeha } if missingRefBehaviour == models.ImportMissingRefEnumCreate { - createdTags, err := createTags(tagWriter, missingTags) + createdTags, err := createTags(ctx, tagWriter, missingTags) if err != nil { return nil, fmt.Errorf("error creating tags: %v", err) } @@ -92,12 +102,12 @@ func importTags(tagWriter models.TagReaderWriter, names []string, missingRefBeha return tags, nil } -func createTags(tagWriter models.TagWriter, names []string) ([]*models.Tag, error) { +func createTags(ctx context.Context, tagWriter tag.NameFinderCreator, names []string) ([]*models.Tag, error) { var ret []*models.Tag for _, name := range names { newTag := *models.NewTag(name) - created, err := tagWriter.Create(newTag) + created, err := tagWriter.Create(ctx, newTag) if err != nil { return nil, err } @@ -108,25 +118,25 @@ func createTags(tagWriter models.TagWriter, names []string) ([]*models.Tag, erro return ret, nil } -func (i *Importer) PostImport(id int) error { +func (i *Importer) PostImport(ctx context.Context, id int) error { if len(i.tags) > 0 { var tagIDs []int for _, t := range i.tags { tagIDs = append(tagIDs, t.ID) } - if err := i.ReaderWriter.UpdateTags(id, tagIDs); err != nil { + if err := i.ReaderWriter.UpdateTags(ctx, id, tagIDs); err != nil { return fmt.Errorf("failed to associate tags: %v", err) } } if len(i.imageData) > 0 { - if err := i.ReaderWriter.UpdateImage(id, i.imageData); err != nil { + if err := i.ReaderWriter.UpdateImage(ctx, id, i.imageData); err != nil { return fmt.Errorf("error setting performer image: %v", err) } } if len(i.Input.StashIDs) > 0 { - if err := i.ReaderWriter.UpdateStashIDs(id, i.Input.StashIDs); err != nil { + if err := i.ReaderWriter.UpdateStashIDs(ctx, id, i.Input.StashIDs); err != nil { return fmt.Errorf("error setting stash id: %v", err) } } @@ -138,9 +148,9 @@ func (i *Importer) Name() string { return i.Input.Name } -func (i *Importer) FindExistingID() (*int, error) { +func (i *Importer) FindExistingID(ctx context.Context) (*int, error) { const nocase = false - existing, err := i.ReaderWriter.FindByNames([]string{i.Name()}, nocase) + existing, err := i.ReaderWriter.FindByNames(ctx, []string{i.Name()}, nocase) if err != nil { return nil, err } @@ -153,8 +163,8 @@ func (i *Importer) FindExistingID() (*int, error) { return nil, nil } -func (i *Importer) Create() (*int, error) { - created, err := i.ReaderWriter.Create(i.performer) +func (i *Importer) Create(ctx context.Context) (*int, error) { + created, err := i.ReaderWriter.Create(ctx, i.performer) if err != nil { return nil, fmt.Errorf("error creating performer: %v", err) } @@ -163,10 +173,10 @@ func (i *Importer) Create() (*int, error) { return &id, nil } -func (i *Importer) Update(id int) error { +func (i *Importer) Update(ctx context.Context, id int) error { performer := i.performer performer.ID = id - _, err := i.ReaderWriter.UpdateFull(performer) + _, err := i.ReaderWriter.UpdateFull(ctx, performer) if err != nil { return fmt.Errorf("error updating existing performer: %v", err) } diff --git a/pkg/performer/import_test.go b/pkg/performer/import_test.go index 30ddbae5e..4f80a67c0 100644 --- a/pkg/performer/import_test.go +++ b/pkg/performer/import_test.go @@ -1,6 +1,7 @@ package performer import ( + "context" "errors" "github.com/stretchr/testify/mock" @@ -29,6 +30,8 @@ const ( missingTagName = "missingTagName" ) +var testCtx = context.Background() + func TestImporterName(t *testing.T) { i := Importer{ Input: jsonschema.Performer{ @@ -47,13 +50,13 @@ func TestImporterPreImport(t *testing.T) { }, } - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.Input = *createFullJSONPerformer(performerName, image) - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) expectedPerformer := *createFullPerformer(0, performerName) @@ -74,20 +77,20 @@ func TestImporterPreImportWithTag(t *testing.T) { }, } - tagReaderWriter.On("FindByNames", []string{existingTagName}, false).Return([]*models.Tag{ + tagReaderWriter.On("FindByNames", testCtx, []string{existingTagName}, false).Return([]*models.Tag{ { ID: existingTagID, Name: existingTagName, }, }, nil).Once() - tagReaderWriter.On("FindByNames", []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Once() + tagReaderWriter.On("FindByNames", testCtx, []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) assert.Equal(t, existingTagID, i.tags[0].ID) i.Input.Tags = []string{existingTagErr} - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) tagReaderWriter.AssertExpectations(t) @@ -106,20 +109,20 @@ func TestImporterPreImportWithMissingTag(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - tagReaderWriter.On("FindByNames", []string{missingTagName}, false).Return(nil, nil).Times(3) - tagReaderWriter.On("Create", mock.AnythingOfType("models.Tag")).Return(&models.Tag{ + tagReaderWriter.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Times(3) + tagReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(&models.Tag{ ID: existingTagID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) assert.Equal(t, existingTagID, i.tags[0].ID) @@ -139,10 +142,10 @@ func TestImporterPreImportWithMissingTagCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - tagReaderWriter.On("FindByNames", []string{missingTagName}, false).Return(nil, nil).Once() - tagReaderWriter.On("Create", mock.AnythingOfType("models.Tag")).Return(nil, errors.New("Create error")) + tagReaderWriter.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Once() + tagReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } @@ -156,13 +159,13 @@ func TestImporterPostImport(t *testing.T) { updatePerformerImageErr := errors.New("UpdateImage error") - readerWriter.On("UpdateImage", performerID, imageBytes).Return(nil).Once() - readerWriter.On("UpdateImage", errImageID, imageBytes).Return(updatePerformerImageErr).Once() + readerWriter.On("UpdateImage", testCtx, performerID, imageBytes).Return(nil).Once() + readerWriter.On("UpdateImage", testCtx, errImageID, imageBytes).Return(updatePerformerImageErr).Once() - err := i.PostImport(performerID) + err := i.PostImport(testCtx, performerID) assert.Nil(t, err) - err = i.PostImport(errImageID) + err = i.PostImport(testCtx, errImageID) assert.NotNil(t, err) readerWriter.AssertExpectations(t) @@ -179,25 +182,25 @@ func TestImporterFindExistingID(t *testing.T) { } errFindByNames := errors.New("FindByNames error") - readerWriter.On("FindByNames", []string{performerName}, false).Return(nil, nil).Once() - readerWriter.On("FindByNames", []string{existingPerformerName}, false).Return([]*models.Performer{ + readerWriter.On("FindByNames", testCtx, []string{performerName}, false).Return(nil, nil).Once() + readerWriter.On("FindByNames", testCtx, []string{existingPerformerName}, false).Return([]*models.Performer{ { ID: existingPerformerID, }, }, nil).Once() - readerWriter.On("FindByNames", []string{performerNameErr}, false).Return(nil, errFindByNames).Once() + readerWriter.On("FindByNames", testCtx, []string{performerNameErr}, false).Return(nil, errFindByNames).Once() - id, err := i.FindExistingID() + id, err := i.FindExistingID(testCtx) assert.Nil(t, id) assert.Nil(t, err) i.Input.Name = existingPerformerName - id, err = i.FindExistingID() + id, err = i.FindExistingID(testCtx) assert.Equal(t, existingPerformerID, *id) assert.Nil(t, err) i.Input.Name = performerNameErr - id, err = i.FindExistingID() + id, err = i.FindExistingID(testCtx) assert.Nil(t, id) assert.NotNil(t, err) @@ -218,13 +221,13 @@ func TestImporterPostImportUpdateTags(t *testing.T) { updateErr := errors.New("UpdateTags error") - readerWriter.On("UpdateTags", performerID, []int{existingTagID}).Return(nil).Once() - readerWriter.On("UpdateTags", errTagsID, mock.AnythingOfType("[]int")).Return(updateErr).Once() + readerWriter.On("UpdateTags", testCtx, performerID, []int{existingTagID}).Return(nil).Once() + readerWriter.On("UpdateTags", testCtx, errTagsID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - err := i.PostImport(performerID) + err := i.PostImport(testCtx, performerID) assert.Nil(t, err) - err = i.PostImport(errTagsID) + err = i.PostImport(testCtx, errTagsID) assert.NotNil(t, err) readerWriter.AssertExpectations(t) @@ -247,17 +250,17 @@ func TestCreate(t *testing.T) { } errCreate := errors.New("Create error") - readerWriter.On("Create", performer).Return(&models.Performer{ + readerWriter.On("Create", testCtx, performer).Return(&models.Performer{ ID: performerID, }, nil).Once() - readerWriter.On("Create", performerErr).Return(nil, errCreate).Once() + readerWriter.On("Create", testCtx, performerErr).Return(nil, errCreate).Once() - id, err := i.Create() + id, err := i.Create(testCtx) assert.Equal(t, performerID, *id) assert.Nil(t, err) i.performer = performerErr - id, err = i.Create() + id, err = i.Create(testCtx) assert.Nil(t, id) assert.NotNil(t, err) @@ -284,18 +287,18 @@ func TestUpdate(t *testing.T) { // id needs to be set for the mock input performer.ID = performerID - readerWriter.On("UpdateFull", performer).Return(nil, nil).Once() + readerWriter.On("UpdateFull", testCtx, performer).Return(nil, nil).Once() - err := i.Update(performerID) + err := i.Update(testCtx, performerID) assert.Nil(t, err) i.performer = performerErr // need to set id separately performerErr.ID = errImageID - readerWriter.On("UpdateFull", performerErr).Return(nil, errUpdate).Once() + readerWriter.On("UpdateFull", testCtx, performerErr).Return(nil, errUpdate).Once() - err = i.Update(errImageID) + err = i.Update(testCtx, errImageID) assert.NotNil(t, err) readerWriter.AssertExpectations(t) diff --git a/pkg/performer/update.go b/pkg/performer/update.go new file mode 100644 index 000000000..5974a5eab --- /dev/null +++ b/pkg/performer/update.go @@ -0,0 +1,12 @@ +package performer + +import ( + "context" + + "github.com/stashapp/stash/pkg/models" +) + +type NameFinderCreator interface { + FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Performer, error) + Create(ctx context.Context, newPerformer models.Performer) (*models.Performer, error) +} diff --git a/pkg/plugin/args.go b/pkg/plugin/args.go index adcdf007f..d85a624c7 100644 --- a/pkg/plugin/args.go +++ b/pkg/plugin/args.go @@ -1,10 +1,20 @@ package plugin -import ( - "github.com/stashapp/stash/pkg/models" -) +type PluginArgInput struct { + Key string `json:"key"` + Value *PluginValueInput `json:"value"` +} -func findArg(args []*models.PluginArgInput, name string) *models.PluginArgInput { +type PluginValueInput struct { + Str *string `json:"str"` + I *int `json:"i"` + B *bool `json:"b"` + F *float64 `json:"f"` + O []*PluginArgInput `json:"o"` + A []*PluginValueInput `json:"a"` +} + +func findArg(args []*PluginArgInput, name string) *PluginArgInput { for _, v := range args { if v.Key == name { return v @@ -14,13 +24,13 @@ func findArg(args []*models.PluginArgInput, name string) *models.PluginArgInput return nil } -func applyDefaultArgs(args []*models.PluginArgInput, defaultArgs map[string]string) []*models.PluginArgInput { +func applyDefaultArgs(args []*PluginArgInput, defaultArgs map[string]string) []*PluginArgInput { for k, v := range defaultArgs { if arg := findArg(args, k); arg == nil { v := v // Copy v, because it's being exported out of the loop - args = append(args, &models.PluginArgInput{ + args = append(args, &PluginArgInput{ Key: k, - Value: &models.PluginValueInput{ + Value: &PluginValueInput{ Str: &v, }, }) diff --git a/pkg/plugin/config.go b/pkg/plugin/config.go index 05501b4e2..2a00c3ced 100644 --- a/pkg/plugin/config.go +++ b/pkg/plugin/config.go @@ -8,7 +8,6 @@ import ( "path/filepath" "strings" - "github.com/stashapp/stash/pkg/models" "gopkg.in/yaml.v2" ) @@ -59,11 +58,11 @@ type Config struct { Hooks []*HookConfig `yaml:"hooks"` } -func (c Config) getPluginTasks(includePlugin bool) []*models.PluginTask { - var ret []*models.PluginTask +func (c Config) getPluginTasks(includePlugin bool) []*PluginTask { + var ret []*PluginTask for _, o := range c.Tasks { - task := &models.PluginTask{ + task := &PluginTask{ Name: o.Name, Description: &o.Description, } @@ -77,11 +76,11 @@ func (c Config) getPluginTasks(includePlugin bool) []*models.PluginTask { return ret } -func (c Config) getPluginHooks(includePlugin bool) []*models.PluginHook { - var ret []*models.PluginHook +func (c Config) getPluginHooks(includePlugin bool) []*PluginHook { + var ret []*PluginHook for _, o := range c.Hooks { - hook := &models.PluginHook{ + hook := &PluginHook{ Name: o.Name, Description: &o.Description, Hooks: convertHooks(o.TriggeredBy), @@ -113,8 +112,8 @@ func (c Config) getName() string { return c.id } -func (c Config) toPlugin() *models.Plugin { - return &models.Plugin{ +func (c Config) toPlugin() *Plugin { + return &Plugin{ ID: c.id, Name: c.getName(), Description: c.Description, diff --git a/pkg/plugin/convert.go b/pkg/plugin/convert.go index 989008d60..7aeb95983 100644 --- a/pkg/plugin/convert.go +++ b/pkg/plugin/convert.go @@ -1,11 +1,10 @@ package plugin import ( - "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/plugin/common" ) -func toPluginArgs(args []*models.PluginArgInput) common.ArgsMap { +func toPluginArgs(args []*PluginArgInput) common.ArgsMap { ret := make(common.ArgsMap) for _, a := range args { ret[a.Key] = toPluginArgValue(a.Value) @@ -14,7 +13,7 @@ func toPluginArgs(args []*models.PluginArgInput) common.ArgsMap { return ret } -func toPluginArgValue(arg *models.PluginValueInput) common.PluginArgValue { +func toPluginArgValue(arg *PluginValueInput) common.PluginArgValue { if arg == nil { return nil } diff --git a/pkg/plugin/hooks.go b/pkg/plugin/hooks.go index f95eb1620..a60e44e6c 100644 --- a/pkg/plugin/hooks.go +++ b/pkg/plugin/hooks.go @@ -5,6 +5,13 @@ import ( "github.com/stashapp/stash/pkg/plugin/common" ) +type PluginHook struct { + Name string `json:"name"` + Description *string `json:"description"` + Hooks []string `json:"hooks"` + Plugin *Plugin `json:"plugin"` +} + type HookTriggerEnum string // Scan-related hooks are current disabled until post-hook execution is diff --git a/pkg/plugin/plugins.go b/pkg/plugin/plugins.go index 85fde229b..ea66adcc2 100644 --- a/pkg/plugin/plugins.go +++ b/pkg/plugin/plugins.go @@ -20,8 +20,19 @@ import ( "github.com/stashapp/stash/pkg/plugin/common" "github.com/stashapp/stash/pkg/session" "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/txn" ) +type Plugin struct { + ID string `json:"id"` + Name string `json:"name"` + Description *string `json:"description"` + URL *string `json:"url"` + Version *string `json:"version"` + Tasks []*PluginTask `json:"tasks"` + Hooks []*PluginHook `json:"hooks"` +} + type ServerConfig interface { GetHost() string GetPort() int @@ -103,8 +114,8 @@ func loadPlugins(path string) ([]Config, error) { } // ListPlugins returns plugin details for all of the loaded plugins. -func (c Cache) ListPlugins() []*models.Plugin { - var ret []*models.Plugin +func (c Cache) ListPlugins() []*Plugin { + var ret []*Plugin for _, s := range c.plugins { ret = append(ret, s.toPlugin()) } @@ -113,8 +124,8 @@ func (c Cache) ListPlugins() []*models.Plugin { } // ListPluginTasks returns all runnable plugin tasks in all loaded plugins. -func (c Cache) ListPluginTasks() []*models.PluginTask { - var ret []*models.PluginTask +func (c Cache) ListPluginTasks() []*PluginTask { + var ret []*PluginTask for _, s := range c.plugins { ret = append(ret, s.getPluginTasks(true)...) } @@ -122,7 +133,7 @@ func (c Cache) ListPluginTasks() []*models.PluginTask { return ret } -func buildPluginInput(plugin *Config, operation *OperationConfig, serverConnection common.StashServerConnection, args []*models.PluginArgInput) common.PluginInput { +func buildPluginInput(plugin *Config, operation *OperationConfig, serverConnection common.StashServerConnection, args []*PluginArgInput) common.PluginInput { args = applyDefaultArgs(args, operation.DefaultArgs) serverConnection.PluginDir = plugin.getConfigPath() return common.PluginInput{ @@ -152,7 +163,7 @@ func (c Cache) makeServerConnection(ctx context.Context) common.StashServerConne // CreateTask runs the plugin operation for the pluginID and operation // name provided. Returns an error if the plugin or the operation could not be // resolved. -func (c Cache) CreateTask(ctx context.Context, pluginID string, operationName string, args []*models.PluginArgInput, progress chan float64) (Task, error) { +func (c Cache) CreateTask(ctx context.Context, pluginID string, operationName string, args []*PluginArgInput, progress chan float64) (Task, error) { serverConnection := c.makeServerConnection(ctx) // find the plugin and operation @@ -189,6 +200,13 @@ func (c Cache) ExecutePostHooks(ctx context.Context, id int, hookType HookTrigge } } +func (c Cache) RegisterPostHooks(ctx context.Context, txnMgr txn.Manager, id int, hookType HookTriggerEnum, input interface{}, inputFields []string) { + txnMgr.AddPostCommitHook(ctx, func(ctx context.Context) error { + c.ExecutePostHooks(ctx, id, hookType, input, inputFields) + return nil + }) +} + func (c Cache) ExecuteSceneUpdatePostHooks(ctx context.Context, input models.SceneUpdateInput, inputFields []string) { id, err := strconv.Atoi(input.ID) if err != nil { diff --git a/pkg/plugin/task.go b/pkg/plugin/task.go index e80c96ff7..58b4b2eba 100644 --- a/pkg/plugin/task.go +++ b/pkg/plugin/task.go @@ -6,6 +6,12 @@ import ( "github.com/stashapp/stash/pkg/plugin/common" ) +type PluginTask struct { + Name string `json:"name"` + Description *string `json:"description"` + Plugin *Plugin `json:"plugin"` +} + // Task is the interface that handles management of a single plugin task. type Task interface { // Start starts the plugin task. Returns an error if task could not be diff --git a/pkg/scene/caption_test.go b/pkg/scene/caption_test.go deleted file mode 100644 index 3c9cb54fb..000000000 --- a/pkg/scene/caption_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package scene - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -var testExts = []string{"mkv", "mp4"} - -type testCase struct { - captionPath string - expectedLang string - expectedCandidates []string -} - -var testCases = []testCase{ - { - captionPath: "/stash/video.vtt", - expectedLang: LangUnknown, - expectedCandidates: []string{"/stash/video.mkv", "/stash/video.mp4"}, - }, - { - captionPath: "/stash/video.en.vtt", - expectedLang: "en", - expectedCandidates: []string{"/stash/video.mkv", "/stash/video.mp4"}, // lang code valid, remove en part - }, - { - captionPath: "/stash/video.test.srt", - expectedLang: LangUnknown, - expectedCandidates: []string{"/stash/video.test.mkv", "/stash/video.test.mp4"}, // no lang code/lang code invalid test should remain - }, - { - captionPath: "C:\\videos\\video.fr.srt", - expectedLang: "fr", - expectedCandidates: []string{"C:\\videos\\video.mkv", "C:\\videos\\video.mp4"}, - }, - { - captionPath: "C:\\videos\\video.xx.srt", - expectedLang: LangUnknown, - expectedCandidates: []string{"C:\\videos\\video.xx.mkv", "C:\\videos\\video.xx.mp4"}, // no lang code/lang code invalid xx should remain - }, -} - -func TestGenerateCaptionCandidates(t *testing.T) { - for _, c := range testCases { - assert.ElementsMatch(t, c.expectedCandidates, GenerateCaptionCandidates(c.captionPath, testExts)) - } -} - -func TestGetCaptionsLangFromPath(t *testing.T) { - for _, l := range testCases { - assert.Equal(t, l.expectedLang, GetCaptionsLangFromPath(l.captionPath)) - } -} diff --git a/pkg/scene/delete.go b/pkg/scene/delete.go index 3a31d6f60..47449f1e3 100644 --- a/pkg/scene/delete.go +++ b/pkg/scene/delete.go @@ -1,9 +1,11 @@ package scene import ( + "context" "path/filepath" "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/file/video" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/paths" @@ -11,7 +13,7 @@ import ( // FileDeleter is an extension of file.Deleter that handles deletion of scene files. type FileDeleter struct { - file.Deleter + *file.Deleter FileNamingAlgo models.HashAlgorithm Paths *paths.Paths @@ -114,35 +116,34 @@ func (d *FileDeleter) MarkMarkerFiles(scene *models.Scene, seconds int) error { return d.Files(files) } +type Destroyer interface { + Destroy(ctx context.Context, id int) error +} + +type MarkerDestroyer interface { + FindBySceneID(ctx context.Context, sceneID int) ([]*models.SceneMarker, error) + Destroy(ctx context.Context, id int) error +} + // Destroy deletes a scene and its associated relationships from the // database. -func Destroy(scene *models.Scene, repo models.Repository, fileDeleter *FileDeleter, deleteGenerated, deleteFile bool) error { - qb := repo.Scene() - mqb := repo.SceneMarker() - - markers, err := mqb.FindBySceneID(scene.ID) +func (s *Service) Destroy(ctx context.Context, scene *models.Scene, fileDeleter *FileDeleter, deleteGenerated, deleteFile bool) error { + mqb := s.MarkerDestroyer + markers, err := mqb.FindBySceneID(ctx, scene.ID) if err != nil { return err } for _, m := range markers { - if err := DestroyMarker(scene, m, mqb, fileDeleter); err != nil { + if err := DestroyMarker(ctx, scene, m, mqb, fileDeleter); err != nil { return err } } if deleteFile { - if err := fileDeleter.Files([]string{scene.Path}); err != nil { + if err := s.deleteFiles(ctx, scene, fileDeleter); err != nil { return err } - - funscriptPath := GetFunscriptPath(scene.Path) - funscriptExists, _ := fsutil.FileExists(funscriptPath) - if funscriptExists { - if err := fileDeleter.Files([]string{funscriptPath}); err != nil { - return err - } - } } if deleteGenerated { @@ -151,18 +152,56 @@ func Destroy(scene *models.Scene, repo models.Repository, fileDeleter *FileDelet } } - if err := qb.Destroy(scene.ID); err != nil { + if err := s.Repository.Destroy(ctx, scene.ID); err != nil { return err } return nil } +// deleteFiles deletes files from the database and file system +func (s *Service) deleteFiles(ctx context.Context, scene *models.Scene, fileDeleter *FileDeleter) error { + if err := scene.LoadFiles(ctx, s.Repository); err != nil { + return err + } + + for _, f := range scene.Files.List() { + // only delete files where there is no other associated scene + otherScenes, err := s.Repository.FindByFileID(ctx, f.ID) + if err != nil { + return err + } + + if len(otherScenes) > 1 { + // other scenes associated, don't remove + continue + } + + const deleteFile = true + if err := file.Destroy(ctx, s.File, f, fileDeleter.Deleter, deleteFile); err != nil { + return err + } + + // don't delete files in zip archives + if f.ZipFileID == nil { + funscriptPath := video.GetFunscriptPath(f.Path) + funscriptExists, _ := fsutil.FileExists(funscriptPath) + if funscriptExists { + if err := fileDeleter.Files([]string{funscriptPath}); err != nil { + return err + } + } + } + } + + return nil +} + // DestroyMarker deletes the scene marker from the database and returns a // function that removes the generated files, to be executed after the // transaction is successfully committed. -func DestroyMarker(scene *models.Scene, sceneMarker *models.SceneMarker, qb models.SceneMarkerWriter, fileDeleter *FileDeleter) error { - if err := qb.Destroy(sceneMarker.ID); err != nil { +func DestroyMarker(ctx context.Context, scene *models.Scene, sceneMarker *models.SceneMarker, qb MarkerDestroyer, fileDeleter *FileDeleter) error { + if err := qb.Destroy(ctx, sceneMarker.ID); err != nil { return err } diff --git a/pkg/scene/export.go b/pkg/scene/export.go index c5bda2c47..343210fe6 100644 --- a/pkg/scene/export.go +++ b/pkg/scene/export.go @@ -1,6 +1,7 @@ package scene import ( + "context" "fmt" "math" "strconv" @@ -9,56 +10,57 @@ import ( "github.com/stashapp/stash/pkg/models/json" "github.com/stashapp/stash/pkg/models/jsonschema" "github.com/stashapp/stash/pkg/sliceutil/intslice" + "github.com/stashapp/stash/pkg/studio" + "github.com/stashapp/stash/pkg/tag" "github.com/stashapp/stash/pkg/utils" ) +type CoverGetter interface { + GetCover(ctx context.Context, sceneID int) ([]byte, error) +} + +type MarkerTagFinder interface { + tag.Finder + TagFinder + FindBySceneMarkerID(ctx context.Context, sceneMarkerID int) ([]*models.Tag, error) +} + +type MarkerFinder interface { + FindBySceneID(ctx context.Context, sceneID int) ([]*models.SceneMarker, error) +} + +type TagFinder interface { + FindBySceneID(ctx context.Context, sceneID int) ([]*models.Tag, error) +} + // ToBasicJSON converts a scene object into its JSON object equivalent. It // does not convert the relationships to other objects, with the exception // of cover image. -func ToBasicJSON(reader models.SceneReader, scene *models.Scene) (*jsonschema.Scene, error) { +func ToBasicJSON(ctx context.Context, reader CoverGetter, scene *models.Scene) (*jsonschema.Scene, error) { newSceneJSON := jsonschema.Scene{ - CreatedAt: json.JSONTime{Time: scene.CreatedAt.Timestamp}, - UpdatedAt: json.JSONTime{Time: scene.UpdatedAt.Timestamp}, + Title: scene.Title, + URL: scene.URL, + Details: scene.Details, + CreatedAt: json.JSONTime{Time: scene.CreatedAt}, + UpdatedAt: json.JSONTime{Time: scene.UpdatedAt}, } - if scene.Checksum.Valid { - newSceneJSON.Checksum = scene.Checksum.String + if scene.Date != nil { + newSceneJSON.Date = scene.Date.String() } - if scene.OSHash.Valid { - newSceneJSON.OSHash = scene.OSHash.String - } - - if scene.Phash.Valid { - newSceneJSON.Phash = utils.PhashToString(scene.Phash.Int64) - } - - if scene.Title.Valid { - newSceneJSON.Title = scene.Title.String - } - - if scene.URL.Valid { - newSceneJSON.URL = scene.URL.String - } - - if scene.Date.Valid { - newSceneJSON.Date = utils.GetYMDFromDatabaseDate(scene.Date.String) - } - - if scene.Rating.Valid { - newSceneJSON.Rating = int(scene.Rating.Int64) + if scene.Rating != nil { + newSceneJSON.Rating = *scene.Rating } newSceneJSON.Organized = scene.Organized newSceneJSON.OCounter = scene.OCounter - if scene.Details.Valid { - newSceneJSON.Details = scene.Details.String + for _, f := range scene.Files.List() { + newSceneJSON.Files = append(newSceneJSON.Files, f.Base().Path) } - newSceneJSON.File = getSceneFileJSON(scene) - - cover, err := reader.GetCover(scene.ID) + cover, err := reader.GetCover(ctx, scene.ID) if err != nil { return nil, fmt.Errorf("error getting scene cover: %v", err) } @@ -67,9 +69,8 @@ func ToBasicJSON(reader models.SceneReader, scene *models.Scene) (*jsonschema.Sc newSceneJSON.Cover = utils.GetBase64StringFromData(cover) } - stashIDs, _ := reader.GetStashIDs(scene.ID) var ret []models.StashID - for _, stashID := range stashIDs { + for _, stashID := range scene.StashIDs.List() { newJoin := models.StashID{ StashID: stashID.StashID, Endpoint: stashID.Endpoint, @@ -82,57 +83,58 @@ func ToBasicJSON(reader models.SceneReader, scene *models.Scene) (*jsonschema.Sc return &newSceneJSON, nil } -func getSceneFileJSON(scene *models.Scene) *jsonschema.SceneFile { - ret := &jsonschema.SceneFile{} +// func getSceneFileJSON(scene *models.Scene) *jsonschema.SceneFile { +// ret := &jsonschema.SceneFile{} - if scene.FileModTime.Valid { - ret.ModTime = json.JSONTime{Time: scene.FileModTime.Timestamp} - } +// TODO +// if scene.FileModTime != nil { +// ret.ModTime = json.JSONTime{Time: *scene.FileModTime} +// } - if scene.Size.Valid { - ret.Size = scene.Size.String - } +// if scene.Size != nil { +// ret.Size = *scene.Size +// } - if scene.Duration.Valid { - ret.Duration = getDecimalString(scene.Duration.Float64) - } +// if scene.Duration != nil { +// ret.Duration = getDecimalString(*scene.Duration) +// } - if scene.VideoCodec.Valid { - ret.VideoCodec = scene.VideoCodec.String - } +// if scene.VideoCodec != nil { +// ret.VideoCodec = *scene.VideoCodec +// } - if scene.AudioCodec.Valid { - ret.AudioCodec = scene.AudioCodec.String - } +// if scene.AudioCodec != nil { +// ret.AudioCodec = *scene.AudioCodec +// } - if scene.Format.Valid { - ret.Format = scene.Format.String - } +// if scene.Format != nil { +// ret.Format = *scene.Format +// } - if scene.Width.Valid { - ret.Width = int(scene.Width.Int64) - } +// if scene.Width != nil { +// ret.Width = *scene.Width +// } - if scene.Height.Valid { - ret.Height = int(scene.Height.Int64) - } +// if scene.Height != nil { +// ret.Height = *scene.Height +// } - if scene.Framerate.Valid { - ret.Framerate = getDecimalString(scene.Framerate.Float64) - } +// if scene.Framerate != nil { +// ret.Framerate = getDecimalString(*scene.Framerate) +// } - if scene.Bitrate.Valid { - ret.Bitrate = int(scene.Bitrate.Int64) - } +// if scene.Bitrate != nil { +// ret.Bitrate = int(*scene.Bitrate) +// } - return ret -} +// return ret +// } // GetStudioName returns the name of the provided scene's studio. It returns an // empty string if there is no studio assigned to the scene. -func GetStudioName(reader models.StudioReader, scene *models.Scene) (string, error) { - if scene.StudioID.Valid { - studio, err := reader.Find(int(scene.StudioID.Int64)) +func GetStudioName(ctx context.Context, reader studio.Finder, scene *models.Scene) (string, error) { + if scene.StudioID != nil { + studio, err := reader.Find(ctx, *scene.StudioID) if err != nil { return "", err } @@ -147,8 +149,8 @@ func GetStudioName(reader models.StudioReader, scene *models.Scene) (string, err // GetTagNames returns a slice of tag names corresponding to the provided // scene's tags. -func GetTagNames(reader models.TagReader, scene *models.Scene) ([]string, error) { - tags, err := reader.FindBySceneID(scene.ID) +func GetTagNames(ctx context.Context, reader TagFinder, scene *models.Scene) ([]string, error) { + tags, err := reader.FindBySceneID(ctx, scene.ID) if err != nil { return nil, fmt.Errorf("error getting scene tags: %v", err) } @@ -168,10 +170,10 @@ func getTagNames(tags []*models.Tag) []string { } // GetDependentTagIDs returns a slice of unique tag IDs that this scene references. -func GetDependentTagIDs(tags models.TagReader, markerReader models.SceneMarkerReader, scene *models.Scene) ([]int, error) { +func GetDependentTagIDs(ctx context.Context, tags MarkerTagFinder, markerReader MarkerFinder, scene *models.Scene) ([]int, error) { var ret []int - t, err := tags.FindBySceneID(scene.ID) + t, err := tags.FindBySceneID(ctx, scene.ID) if err != nil { return nil, err } @@ -180,14 +182,14 @@ func GetDependentTagIDs(tags models.TagReader, markerReader models.SceneMarkerRe ret = intslice.IntAppendUnique(ret, tt.ID) } - sm, err := markerReader.FindBySceneID(scene.ID) + sm, err := markerReader.FindBySceneID(ctx, scene.ID) if err != nil { return nil, err } for _, smm := range sm { ret = intslice.IntAppendUnique(ret, smm.PrimaryTagID) - smmt, err := tags.FindBySceneMarkerID(smm.ID) + smmt, err := tags.FindBySceneMarkerID(ctx, smm.ID) if err != nil { return nil, fmt.Errorf("invalid tags for scene marker: %v", err) } @@ -200,25 +202,28 @@ func GetDependentTagIDs(tags models.TagReader, markerReader models.SceneMarkerRe return ret, nil } +type MovieFinder interface { + Find(ctx context.Context, id int) (*models.Movie, error) +} + // GetSceneMoviesJSON returns a slice of SceneMovie JSON representation objects // corresponding to the provided scene's scene movie relationships. -func GetSceneMoviesJSON(movieReader models.MovieReader, sceneReader models.SceneReader, scene *models.Scene) ([]jsonschema.SceneMovie, error) { - sceneMovies, err := sceneReader.GetMovies(scene.ID) - if err != nil { - return nil, fmt.Errorf("error getting scene movies: %v", err) - } +func GetSceneMoviesJSON(ctx context.Context, movieReader MovieFinder, scene *models.Scene) ([]jsonschema.SceneMovie, error) { + sceneMovies := scene.Movies.List() var results []jsonschema.SceneMovie for _, sceneMovie := range sceneMovies { - movie, err := movieReader.Find(sceneMovie.MovieID) + movie, err := movieReader.Find(ctx, sceneMovie.MovieID) if err != nil { return nil, fmt.Errorf("error getting movie: %v", err) } if movie.Name.Valid { sceneMovieJSON := jsonschema.SceneMovie{ - MovieName: movie.Name.String, - SceneIndex: int(sceneMovie.SceneIndex.Int64), + MovieName: movie.Name.String, + } + if sceneMovie.SceneIndex != nil { + sceneMovieJSON.SceneIndex = *sceneMovie.SceneIndex } results = append(results, sceneMovieJSON) } @@ -228,14 +233,10 @@ func GetSceneMoviesJSON(movieReader models.MovieReader, sceneReader models.Scene } // GetDependentMovieIDs returns a slice of movie IDs that this scene references. -func GetDependentMovieIDs(sceneReader models.SceneReader, scene *models.Scene) ([]int, error) { +func GetDependentMovieIDs(ctx context.Context, scene *models.Scene) ([]int, error) { var ret []int - m, err := sceneReader.GetMovies(scene.ID) - if err != nil { - return nil, err - } - + m := scene.Movies.List() for _, mm := range m { ret = append(ret, mm.MovieID) } @@ -245,8 +246,8 @@ func GetDependentMovieIDs(sceneReader models.SceneReader, scene *models.Scene) ( // GetSceneMarkersJSON returns a slice of SceneMarker JSON representation // objects corresponding to the provided scene's markers. -func GetSceneMarkersJSON(markerReader models.SceneMarkerReader, tagReader models.TagReader, scene *models.Scene) ([]jsonschema.SceneMarker, error) { - sceneMarkers, err := markerReader.FindBySceneID(scene.ID) +func GetSceneMarkersJSON(ctx context.Context, markerReader MarkerFinder, tagReader MarkerTagFinder, scene *models.Scene) ([]jsonschema.SceneMarker, error) { + sceneMarkers, err := markerReader.FindBySceneID(ctx, scene.ID) if err != nil { return nil, fmt.Errorf("error getting scene markers: %v", err) } @@ -254,12 +255,12 @@ func GetSceneMarkersJSON(markerReader models.SceneMarkerReader, tagReader models var results []jsonschema.SceneMarker for _, sceneMarker := range sceneMarkers { - primaryTag, err := tagReader.Find(sceneMarker.PrimaryTagID) + primaryTag, err := tagReader.Find(ctx, sceneMarker.PrimaryTagID) if err != nil { return nil, fmt.Errorf("invalid primary tag for scene marker: %v", err) } - sceneMarkerTags, err := tagReader.FindBySceneMarkerID(sceneMarker.ID) + sceneMarkerTags, err := tagReader.FindBySceneMarkerID(ctx, sceneMarker.ID) if err != nil { return nil, fmt.Errorf("invalid tags for scene marker: %v", err) } diff --git a/pkg/scene/export_test.go b/pkg/scene/export_test.go index aa8b7fb52..623e399a1 100644 --- a/pkg/scene/export_test.go +++ b/pkg/scene/export_test.go @@ -1,14 +1,13 @@ package scene import ( - "database/sql" "errors" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/json" "github.com/stashapp/stash/pkg/models/jsonschema" "github.com/stashapp/stash/pkg/models/mocks" - "github.com/stashapp/stash/pkg/utils" "github.com/stretchr/testify/assert" "testing" @@ -24,14 +23,10 @@ const ( missingStudioID = 5 errStudioID = 6 - // noGalleryID = 7 - // errGalleryID = 8 - noTagsID = 11 errTagsID = 12 noMoviesID = 13 - errMoviesID = 14 errFindMovieID = 15 noMarkersID = 16 @@ -40,31 +35,18 @@ const ( errFindByMarkerID = 19 ) -const ( - url = "url" - checksum = "checksum" - oshash = "oshash" - title = "title" - phash = -3846826108889195 - date = "2001-01-01" - rating = 5 - ocounter = 2 - organized = true - details = "details" - size = "size" - duration = 1.23 - durationStr = "1.23" - videoCodec = "videoCodec" - audioCodec = "audioCodec" - format = "format" - width = 100 - height = 100 - framerate = 3.21 - framerateStr = "3.21" - bitrate = 1 +var ( + url = "url" + title = "title" + date = "2001-01-01" + dateObj = models.NewDate(date) + rating = 5 + ocounter = 2 + organized = true + details = "details" ) -const ( +var ( studioName = "studioName" // galleryChecksum = "galleryChecksum" @@ -90,11 +72,11 @@ var stashID = models.StashID{ StashID: "StashID", Endpoint: "Endpoint", } -var stashIDs = []*models.StashID{ - &stashID, -} -const imageBase64 = "aW1hZ2VCeXRlcw==" +const ( + path = "path" + imageBase64 = "aW1hZ2VCeXRlcw==" +) var ( createTime = time.Date(2001, 01, 01, 0, 0, 0, 0, time.UTC) @@ -103,79 +85,55 @@ var ( func createFullScene(id int) models.Scene { return models.Scene{ - ID: id, - Title: models.NullString(title), - AudioCodec: models.NullString(audioCodec), - Bitrate: models.NullInt64(bitrate), - Checksum: models.NullString(checksum), - Date: models.SQLiteDate{ - String: date, - Valid: true, - }, - Details: models.NullString(details), - Duration: sql.NullFloat64{ - Float64: duration, - Valid: true, - }, - Format: models.NullString(format), - Framerate: sql.NullFloat64{ - Float64: framerate, - Valid: true, - }, - Height: models.NullInt64(height), - OCounter: ocounter, - OSHash: models.NullString(oshash), - Phash: models.NullInt64(phash), - Rating: models.NullInt64(rating), - Organized: organized, - Size: models.NullString(size), - VideoCodec: models.NullString(videoCodec), - Width: models.NullInt64(width), - URL: models.NullString(url), - CreatedAt: models.SQLiteTimestamp{ - Timestamp: createTime, - }, - UpdatedAt: models.SQLiteTimestamp{ - Timestamp: updateTime, - }, + ID: id, + Title: title, + Date: &dateObj, + Details: details, + OCounter: ocounter, + Rating: &rating, + Organized: organized, + URL: url, + Files: models.NewRelatedVideoFiles([]*file.VideoFile{ + { + BaseFile: &file.BaseFile{ + Path: path, + }, + }, + }), + StashIDs: models.NewRelatedStashIDs([]models.StashID{ + stashID, + }), + CreatedAt: createTime, + UpdatedAt: updateTime, } } func createEmptyScene(id int) models.Scene { return models.Scene{ ID: id, - CreatedAt: models.SQLiteTimestamp{ - Timestamp: createTime, - }, - UpdatedAt: models.SQLiteTimestamp{ - Timestamp: updateTime, - }, + Files: models.NewRelatedVideoFiles([]*file.VideoFile{ + { + BaseFile: &file.BaseFile{ + Path: path, + }, + }, + }), + StashIDs: models.NewRelatedStashIDs([]models.StashID{}), + CreatedAt: createTime, + UpdatedAt: updateTime, } } func createFullJSONScene(image string) *jsonschema.Scene { return &jsonschema.Scene{ Title: title, - Checksum: checksum, + Files: []string{path}, Date: date, Details: details, OCounter: ocounter, - OSHash: oshash, - Phash: utils.PhashToString(phash), Rating: rating, Organized: organized, URL: url, - File: &jsonschema.SceneFile{ - AudioCodec: audioCodec, - Bitrate: bitrate, - Duration: durationStr, - Format: format, - Framerate: framerateStr, - Height: height, - Size: size, - VideoCodec: videoCodec, - Width: width, - }, CreatedAt: json.JSONTime{ Time: createTime, }, @@ -191,7 +149,7 @@ func createFullJSONScene(image string) *jsonschema.Scene { func createEmptyJSONScene() *jsonschema.Scene { return &jsonschema.Scene{ - File: &jsonschema.SceneFile{}, + Files: []string{path}, CreatedAt: json.JSONTime{ Time: createTime, }, @@ -230,16 +188,13 @@ func TestToJSON(t *testing.T) { imageErr := errors.New("error getting image") - mockSceneReader.On("GetCover", sceneID).Return(imageBytes, nil).Once() - mockSceneReader.On("GetCover", noImageID).Return(nil, nil).Once() - mockSceneReader.On("GetCover", errImageID).Return(nil, imageErr).Once() - - mockSceneReader.On("GetStashIDs", sceneID).Return(stashIDs, nil).Once() - mockSceneReader.On("GetStashIDs", noImageID).Return(nil, nil).Once() + mockSceneReader.On("GetCover", testCtx, sceneID).Return(imageBytes, nil).Once() + mockSceneReader.On("GetCover", testCtx, noImageID).Return(nil, nil).Once() + mockSceneReader.On("GetCover", testCtx, errImageID).Return(nil, imageErr).Once() for i, s := range scenarios { scene := s.input - json, err := ToBasicJSON(mockSceneReader, &scene) + json, err := ToBasicJSON(testCtx, mockSceneReader, &scene) switch { case !s.err && err != nil: @@ -256,7 +211,7 @@ func TestToJSON(t *testing.T) { func createStudioScene(studioID int) models.Scene { return models.Scene{ - StudioID: models.NullInt64(int64(studioID)), + StudioID: &studioID, } } @@ -289,15 +244,15 @@ func TestGetStudioName(t *testing.T) { studioErr := errors.New("error getting image") - mockStudioReader.On("Find", studioID).Return(&models.Studio{ + mockStudioReader.On("Find", testCtx, studioID).Return(&models.Studio{ Name: models.NullString(studioName), }, nil).Once() - mockStudioReader.On("Find", missingStudioID).Return(nil, nil).Once() - mockStudioReader.On("Find", errStudioID).Return(nil, studioErr).Once() + mockStudioReader.On("Find", testCtx, missingStudioID).Return(nil, nil).Once() + mockStudioReader.On("Find", testCtx, errStudioID).Return(nil, studioErr).Once() for i, s := range getStudioScenarios { scene := s.input - json, err := GetStudioName(mockStudioReader, &scene) + json, err := GetStudioName(testCtx, mockStudioReader, &scene) switch { case !s.err && err != nil: @@ -352,13 +307,13 @@ func TestGetTagNames(t *testing.T) { tagErr := errors.New("error getting tag") - mockTagReader.On("FindBySceneID", sceneID).Return(getTags(names), nil).Once() - mockTagReader.On("FindBySceneID", noTagsID).Return(nil, nil).Once() - mockTagReader.On("FindBySceneID", errTagsID).Return(nil, tagErr).Once() + mockTagReader.On("FindBySceneID", testCtx, sceneID).Return(getTags(names), nil).Once() + mockTagReader.On("FindBySceneID", testCtx, noTagsID).Return(nil, nil).Once() + mockTagReader.On("FindBySceneID", testCtx, errTagsID).Return(nil, tagErr).Once() for i, s := range getTagNamesScenarios { scene := s.input - json, err := GetTagNames(mockTagReader, &scene) + json, err := GetTagNames(testCtx, mockTagReader, &scene) switch { case !s.err && err != nil: @@ -379,9 +334,30 @@ type sceneMoviesTestScenario struct { err bool } +var validMovies = models.NewRelatedMovies([]models.MoviesScenes{ + { + MovieID: validMovie1, + SceneIndex: &movie1Scene, + }, + { + MovieID: validMovie2, + SceneIndex: &movie2Scene, + }, +}) + +var invalidMovies = models.NewRelatedMovies([]models.MoviesScenes{ + { + MovieID: invalidMovie, + SceneIndex: &movie1Scene, + }, +}) + var getSceneMoviesJSONScenarios = []sceneMoviesTestScenario{ { - createEmptyScene(sceneID), + models.Scene{ + ID: sceneID, + Movies: validMovies, + }, []jsonschema.SceneMovie{ { MovieName: movie1Name, @@ -395,63 +371,38 @@ var getSceneMoviesJSONScenarios = []sceneMoviesTestScenario{ false, }, { - createEmptyScene(noMoviesID), + models.Scene{ + ID: noMoviesID, + Movies: models.NewRelatedMovies([]models.MoviesScenes{}), + }, nil, false, }, { - createEmptyScene(errMoviesID), + models.Scene{ + ID: errFindMovieID, + Movies: invalidMovies, + }, nil, true, }, - { - createEmptyScene(errFindMovieID), - nil, - true, - }, -} - -var validMovies = []models.MoviesScenes{ - { - MovieID: validMovie1, - SceneIndex: models.NullInt64(movie1Scene), - }, - { - MovieID: validMovie2, - SceneIndex: models.NullInt64(movie2Scene), - }, -} - -var invalidMovies = []models.MoviesScenes{ - { - MovieID: invalidMovie, - SceneIndex: models.NullInt64(movie1Scene), - }, } func TestGetSceneMoviesJSON(t *testing.T) { mockMovieReader := &mocks.MovieReaderWriter{} - mockSceneReader := &mocks.SceneReaderWriter{} - - joinErr := errors.New("error getting scene movies") movieErr := errors.New("error getting movie") - mockSceneReader.On("GetMovies", sceneID).Return(validMovies, nil).Once() - mockSceneReader.On("GetMovies", noMoviesID).Return(nil, nil).Once() - mockSceneReader.On("GetMovies", errMoviesID).Return(nil, joinErr).Once() - mockSceneReader.On("GetMovies", errFindMovieID).Return(invalidMovies, nil).Once() - - mockMovieReader.On("Find", validMovie1).Return(&models.Movie{ + mockMovieReader.On("Find", testCtx, validMovie1).Return(&models.Movie{ Name: models.NullString(movie1Name), }, nil).Once() - mockMovieReader.On("Find", validMovie2).Return(&models.Movie{ + mockMovieReader.On("Find", testCtx, validMovie2).Return(&models.Movie{ Name: models.NullString(movie2Name), }, nil).Once() - mockMovieReader.On("Find", invalidMovie).Return(nil, movieErr).Once() + mockMovieReader.On("Find", testCtx, invalidMovie).Return(nil, movieErr).Once() for i, s := range getSceneMoviesJSONScenarios { scene := s.input - json, err := GetSceneMoviesJSON(mockMovieReader, mockSceneReader, &scene) + json, err := GetSceneMoviesJSON(testCtx, mockMovieReader, &scene) switch { case !s.err && err != nil: @@ -603,21 +554,21 @@ func TestGetSceneMarkersJSON(t *testing.T) { markersErr := errors.New("error getting scene markers") tagErr := errors.New("error getting tags") - mockMarkerReader.On("FindBySceneID", sceneID).Return(validMarkers, nil).Once() - mockMarkerReader.On("FindBySceneID", noMarkersID).Return(nil, nil).Once() - mockMarkerReader.On("FindBySceneID", errMarkersID).Return(nil, markersErr).Once() - mockMarkerReader.On("FindBySceneID", errFindPrimaryTagID).Return(invalidMarkers1, nil).Once() - mockMarkerReader.On("FindBySceneID", errFindByMarkerID).Return(invalidMarkers2, nil).Once() + mockMarkerReader.On("FindBySceneID", testCtx, sceneID).Return(validMarkers, nil).Once() + mockMarkerReader.On("FindBySceneID", testCtx, noMarkersID).Return(nil, nil).Once() + mockMarkerReader.On("FindBySceneID", testCtx, errMarkersID).Return(nil, markersErr).Once() + mockMarkerReader.On("FindBySceneID", testCtx, errFindPrimaryTagID).Return(invalidMarkers1, nil).Once() + mockMarkerReader.On("FindBySceneID", testCtx, errFindByMarkerID).Return(invalidMarkers2, nil).Once() - mockTagReader.On("Find", validTagID1).Return(&models.Tag{ + mockTagReader.On("Find", testCtx, validTagID1).Return(&models.Tag{ Name: validTagName1, }, nil) - mockTagReader.On("Find", validTagID2).Return(&models.Tag{ + mockTagReader.On("Find", testCtx, validTagID2).Return(&models.Tag{ Name: validTagName2, }, nil) - mockTagReader.On("Find", invalidTagID).Return(nil, tagErr) + mockTagReader.On("Find", testCtx, invalidTagID).Return(nil, tagErr) - mockTagReader.On("FindBySceneMarkerID", validMarkerID1).Return([]*models.Tag{ + mockTagReader.On("FindBySceneMarkerID", testCtx, validMarkerID1).Return([]*models.Tag{ { Name: validTagName1, }, @@ -625,16 +576,16 @@ func TestGetSceneMarkersJSON(t *testing.T) { Name: validTagName2, }, }, nil) - mockTagReader.On("FindBySceneMarkerID", validMarkerID2).Return([]*models.Tag{ + mockTagReader.On("FindBySceneMarkerID", testCtx, validMarkerID2).Return([]*models.Tag{ { Name: validTagName2, }, }, nil) - mockTagReader.On("FindBySceneMarkerID", invalidMarkerID2).Return(nil, tagErr).Once() + mockTagReader.On("FindBySceneMarkerID", testCtx, invalidMarkerID2).Return(nil, tagErr).Once() for i, s := range getSceneMarkersJSONScenarios { scene := s.input - json, err := GetSceneMarkersJSON(mockMarkerReader, mockTagReader, &scene) + json, err := GetSceneMarkersJSON(testCtx, mockMarkerReader, mockTagReader, &scene) switch { case !s.err && err != nil: diff --git a/pkg/scene/generate/screenshot.go b/pkg/scene/generate/screenshot.go index 0ab0c6006..41ecc8fe8 100644 --- a/pkg/scene/generate/screenshot.go +++ b/pkg/scene/generate/screenshot.go @@ -32,6 +32,8 @@ func (g Generator) Screenshot(ctx context.Context, input string, hash string, vi } } + logger.Infof("Creating screenshot for %s", input) + at := screenshotDurationProportion * videoDuration if options.At != nil { at = *options.At diff --git a/pkg/scene/import.go b/pkg/scene/import.go index 103be88fd..79d95aa04 100644 --- a/pkg/scene/import.go +++ b/pkg/scene/import.go @@ -1,58 +1,69 @@ package scene import ( - "database/sql" + "context" "fmt" - "strconv" "strings" + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/gallery" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/movie" + "github.com/stashapp/stash/pkg/performer" "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/studio" + "github.com/stashapp/stash/pkg/tag" "github.com/stashapp/stash/pkg/utils" ) +type FullCreatorUpdater interface { + CreatorUpdater + Update(ctx context.Context, updatedScene *models.Scene) error + Updater +} + type Importer struct { - ReaderWriter models.SceneReaderWriter - StudioWriter models.StudioReaderWriter - GalleryWriter models.GalleryReaderWriter - PerformerWriter models.PerformerReaderWriter - MovieWriter models.MovieReaderWriter - TagWriter models.TagReaderWriter + ReaderWriter FullCreatorUpdater + FileFinder file.Getter + StudioWriter studio.NameFinderCreator + GalleryFinder gallery.Finder + PerformerWriter performer.NameFinderCreator + MovieWriter movie.NameFinderCreator + TagWriter tag.NameFinderCreator Input jsonschema.Scene - Path string MissingRefBehaviour models.ImportMissingRefEnum FileNamingAlgorithm models.HashAlgorithm ID int scene models.Scene - galleries []*models.Gallery - performers []*models.Performer - movies []models.MoviesScenes - tags []*models.Tag coverImageData []byte } -func (i *Importer) PreImport() error { +func (i *Importer) PreImport(ctx context.Context) error { i.scene = i.sceneJSONToScene(i.Input) - if err := i.populateStudio(); err != nil { + if err := i.populateFiles(ctx); err != nil { return err } - if err := i.populateGalleries(); err != nil { + if err := i.populateStudio(ctx); err != nil { return err } - if err := i.populatePerformers(); err != nil { + if err := i.populateGalleries(ctx); err != nil { return err } - if err := i.populateTags(); err != nil { + if err := i.populatePerformers(ctx); err != nil { return err } - if err := i.populateMovies(); err != nil { + if err := i.populateTags(ctx); err != nil { + return err + } + + if err := i.populateMovies(ctx); err != nil { return err } @@ -69,75 +80,58 @@ func (i *Importer) PreImport() error { func (i *Importer) sceneJSONToScene(sceneJSON jsonschema.Scene) models.Scene { newScene := models.Scene{ - Checksum: sql.NullString{String: sceneJSON.Checksum, Valid: sceneJSON.Checksum != ""}, - OSHash: sql.NullString{String: sceneJSON.OSHash, Valid: sceneJSON.OSHash != ""}, - Path: i.Path, + // Path: i.Path, + Title: sceneJSON.Title, + Details: sceneJSON.Details, + URL: sceneJSON.URL, + PerformerIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + GalleryIDs: models.NewRelatedIDs([]int{}), + Movies: models.NewRelatedMovies([]models.MoviesScenes{}), + StashIDs: models.NewRelatedStashIDs(sceneJSON.StashIDs), } - if sceneJSON.Phash != "" { - hash, err := strconv.ParseUint(sceneJSON.Phash, 16, 64) - newScene.Phash = sql.NullInt64{Int64: int64(hash), Valid: err == nil} - } - - if sceneJSON.Title != "" { - newScene.Title = sql.NullString{String: sceneJSON.Title, Valid: true} - } - if sceneJSON.Details != "" { - newScene.Details = sql.NullString{String: sceneJSON.Details, Valid: true} - } - if sceneJSON.URL != "" { - newScene.URL = sql.NullString{String: sceneJSON.URL, Valid: true} - } if sceneJSON.Date != "" { - newScene.Date = models.SQLiteDate{String: sceneJSON.Date, Valid: true} + d := models.NewDate(sceneJSON.Date) + newScene.Date = &d } if sceneJSON.Rating != 0 { - newScene.Rating = sql.NullInt64{Int64: int64(sceneJSON.Rating), Valid: true} + newScene.Rating = &sceneJSON.Rating } newScene.Organized = sceneJSON.Organized newScene.OCounter = sceneJSON.OCounter - newScene.CreatedAt = models.SQLiteTimestamp{Timestamp: sceneJSON.CreatedAt.GetTime()} - newScene.UpdatedAt = models.SQLiteTimestamp{Timestamp: sceneJSON.UpdatedAt.GetTime()} - - if sceneJSON.File != nil { - if sceneJSON.File.Size != "" { - newScene.Size = sql.NullString{String: sceneJSON.File.Size, Valid: true} - } - if sceneJSON.File.Duration != "" { - duration, _ := strconv.ParseFloat(sceneJSON.File.Duration, 64) - newScene.Duration = sql.NullFloat64{Float64: duration, Valid: true} - } - if sceneJSON.File.VideoCodec != "" { - newScene.VideoCodec = sql.NullString{String: sceneJSON.File.VideoCodec, Valid: true} - } - if sceneJSON.File.AudioCodec != "" { - newScene.AudioCodec = sql.NullString{String: sceneJSON.File.AudioCodec, Valid: true} - } - if sceneJSON.File.Format != "" { - newScene.Format = sql.NullString{String: sceneJSON.File.Format, Valid: true} - } - if sceneJSON.File.Width != 0 { - newScene.Width = sql.NullInt64{Int64: int64(sceneJSON.File.Width), Valid: true} - } - if sceneJSON.File.Height != 0 { - newScene.Height = sql.NullInt64{Int64: int64(sceneJSON.File.Height), Valid: true} - } - if sceneJSON.File.Framerate != "" { - framerate, _ := strconv.ParseFloat(sceneJSON.File.Framerate, 64) - newScene.Framerate = sql.NullFloat64{Float64: framerate, Valid: true} - } - if sceneJSON.File.Bitrate != 0 { - newScene.Bitrate = sql.NullInt64{Int64: int64(sceneJSON.File.Bitrate), Valid: true} - } - } + newScene.CreatedAt = sceneJSON.CreatedAt.GetTime() + newScene.UpdatedAt = sceneJSON.UpdatedAt.GetTime() return newScene } -func (i *Importer) populateStudio() error { +func (i *Importer) populateFiles(ctx context.Context) error { + files := make([]*file.VideoFile, 0) + + for _, ref := range i.Input.Files { + path := ref + f, err := i.FileFinder.FindByPath(ctx, path) + if err != nil { + return fmt.Errorf("error finding file: %w", err) + } + + if f == nil { + return fmt.Errorf("scene file '%s' not found", path) + } else { + files = append(files, f.(*file.VideoFile)) + } + } + + i.scene.Files = models.NewRelatedVideoFiles(files) + + return nil +} + +func (i *Importer) populateStudio(ctx context.Context) error { if i.Input.Studio != "" { - studio, err := i.StudioWriter.FindByName(i.Input.Studio, false) + studio, err := i.StudioWriter.FindByName(ctx, i.Input.Studio, false) if err != nil { return fmt.Errorf("error finding studio by name: %v", err) } @@ -152,27 +146,24 @@ func (i *Importer) populateStudio() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - studioID, err := i.createStudio(i.Input.Studio) + studioID, err := i.createStudio(ctx, i.Input.Studio) if err != nil { return err } - i.scene.StudioID = sql.NullInt64{ - Int64: int64(studioID), - Valid: true, - } + i.scene.StudioID = &studioID } } else { - i.scene.StudioID = sql.NullInt64{Int64: int64(studio.ID), Valid: true} + i.scene.StudioID = &studio.ID } } return nil } -func (i *Importer) createStudio(name string) (int, error) { +func (i *Importer) createStudio(ctx context.Context, name string) (int, error) { newStudio := *models.NewStudio(name) - created, err := i.StudioWriter.Create(newStudio) + created, err := i.StudioWriter.Create(ctx, newStudio) if err != nil { return 0, err } @@ -180,41 +171,60 @@ func (i *Importer) createStudio(name string) (int, error) { return created.ID, nil } -func (i *Importer) populateGalleries() error { - if len(i.Input.Galleries) > 0 { - checksums := i.Input.Galleries - galleries, err := i.GalleryWriter.FindByChecksums(checksums) +func (i *Importer) locateGallery(ctx context.Context, ref jsonschema.GalleryRef) (*models.Gallery, error) { + var galleries []*models.Gallery + var err error + switch { + case ref.FolderPath != "": + galleries, err = i.GalleryFinder.FindByPath(ctx, ref.FolderPath) + case len(ref.ZipFiles) > 0: + for _, p := range ref.ZipFiles { + galleries, err = i.GalleryFinder.FindByPath(ctx, p) + if err != nil { + break + } + + if len(galleries) > 0 { + break + } + } + case ref.Title != "": + galleries, err = i.GalleryFinder.FindUserGalleryByTitle(ctx, ref.Title) + } + + var ret *models.Gallery + if len(galleries) > 0 { + ret = galleries[0] + } + + return ret, err +} + +func (i *Importer) populateGalleries(ctx context.Context) error { + for _, ref := range i.Input.Galleries { + gallery, err := i.locateGallery(ctx, ref) if err != nil { return err } - var pluckedChecksums []string - for _, gallery := range galleries { - pluckedChecksums = append(pluckedChecksums, gallery.Checksum) - } - - missingGalleries := stringslice.StrFilter(checksums, func(checksum string) bool { - return !stringslice.StrInclude(pluckedChecksums, checksum) - }) - - if len(missingGalleries) > 0 { + if gallery == nil { if i.MissingRefBehaviour == models.ImportMissingRefEnumFail { - return fmt.Errorf("scene galleries [%s] not found", strings.Join(missingGalleries, ", ")) + return fmt.Errorf("scene gallery '%s' not found", ref.String()) } // we don't create galleries - just ignore + } else { + i.scene.GalleryIDs.Add(gallery.ID) } - - i.galleries = galleries } return nil } -func (i *Importer) populatePerformers() error { +func (i *Importer) populatePerformers(ctx context.Context) error { if len(i.Input.Performers) > 0 { names := i.Input.Performers - performers, err := i.PerformerWriter.FindByNames(names, false) + performers, err := i.PerformerWriter.FindByNames(ctx, names, false) if err != nil { return err } @@ -237,7 +247,7 @@ func (i *Importer) populatePerformers() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - createdPerformers, err := i.createPerformers(missingPerformers) + createdPerformers, err := i.createPerformers(ctx, missingPerformers) if err != nil { return fmt.Errorf("error creating scene performers: %v", err) } @@ -248,18 +258,20 @@ func (i *Importer) populatePerformers() error { // ignore if MissingRefBehaviour set to Ignore } - i.performers = performers + for _, p := range performers { + i.scene.PerformerIDs.Add(p.ID) + } } return nil } -func (i *Importer) createPerformers(names []string) ([]*models.Performer, error) { +func (i *Importer) createPerformers(ctx context.Context, names []string) ([]*models.Performer, error) { var ret []*models.Performer for _, name := range names { newPerformer := *models.NewPerformer(name) - created, err := i.PerformerWriter.Create(newPerformer) + created, err := i.PerformerWriter.Create(ctx, newPerformer) if err != nil { return nil, err } @@ -270,10 +282,10 @@ func (i *Importer) createPerformers(names []string) ([]*models.Performer, error) return ret, nil } -func (i *Importer) populateMovies() error { +func (i *Importer) populateMovies(ctx context.Context) error { if len(i.Input.Movies) > 0 { for _, inputMovie := range i.Input.Movies { - movie, err := i.MovieWriter.FindByName(inputMovie.MovieName, false) + movie, err := i.MovieWriter.FindByName(ctx, inputMovie.MovieName, false) if err != nil { return fmt.Errorf("error finding scene movie: %v", err) } @@ -284,7 +296,7 @@ func (i *Importer) populateMovies() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - movie, err = i.createMovie(inputMovie.MovieName) + movie, err = i.createMovie(ctx, inputMovie.MovieName) if err != nil { return fmt.Errorf("error creating scene movie: %v", err) } @@ -301,23 +313,21 @@ func (i *Importer) populateMovies() error { } if inputMovie.SceneIndex != 0 { - toAdd.SceneIndex = sql.NullInt64{ - Int64: int64(inputMovie.SceneIndex), - Valid: true, - } + index := inputMovie.SceneIndex + toAdd.SceneIndex = &index } - i.movies = append(i.movies, toAdd) + i.scene.Movies.Add(toAdd) } } return nil } -func (i *Importer) createMovie(name string) (*models.Movie, error) { +func (i *Importer) createMovie(ctx context.Context, name string) (*models.Movie, error) { newMovie := *models.NewMovie(name) - created, err := i.MovieWriter.Create(newMovie) + created, err := i.MovieWriter.Create(ctx, newMovie) if err != nil { return nil, err } @@ -325,131 +335,90 @@ func (i *Importer) createMovie(name string) (*models.Movie, error) { return created, nil } -func (i *Importer) populateTags() error { +func (i *Importer) populateTags(ctx context.Context) error { if len(i.Input.Tags) > 0 { - tags, err := importTags(i.TagWriter, i.Input.Tags, i.MissingRefBehaviour) + tags, err := importTags(ctx, i.TagWriter, i.Input.Tags, i.MissingRefBehaviour) if err != nil { return err } - i.tags = tags + for _, p := range tags { + i.scene.TagIDs.Add(p.ID) + } } return nil } -func (i *Importer) PostImport(id int) error { +func (i *Importer) PostImport(ctx context.Context, id int) error { if len(i.coverImageData) > 0 { - if err := i.ReaderWriter.UpdateCover(id, i.coverImageData); err != nil { + if err := i.ReaderWriter.UpdateCover(ctx, id, i.coverImageData); err != nil { return fmt.Errorf("error setting scene images: %v", err) } } - if len(i.galleries) > 0 { - var galleryIDs []int - for _, gallery := range i.galleries { - galleryIDs = append(galleryIDs, gallery.ID) - } - - if err := i.ReaderWriter.UpdateGalleries(id, galleryIDs); err != nil { - return fmt.Errorf("failed to associate galleries: %v", err) - } - } - - if len(i.performers) > 0 { - var performerIDs []int - for _, performer := range i.performers { - performerIDs = append(performerIDs, performer.ID) - } - - if err := i.ReaderWriter.UpdatePerformers(id, performerIDs); err != nil { - return fmt.Errorf("failed to associate performers: %v", err) - } - } - - if len(i.movies) > 0 { - for index := range i.movies { - i.movies[index].SceneID = id - } - if err := i.ReaderWriter.UpdateMovies(id, i.movies); err != nil { - return fmt.Errorf("failed to associate movies: %v", err) - } - } - - if len(i.tags) > 0 { - var tagIDs []int - for _, t := range i.tags { - tagIDs = append(tagIDs, t.ID) - } - if err := i.ReaderWriter.UpdateTags(id, tagIDs); err != nil { - return fmt.Errorf("failed to associate tags: %v", err) - } - } - - if len(i.Input.StashIDs) > 0 { - if err := i.ReaderWriter.UpdateStashIDs(id, i.Input.StashIDs); err != nil { - return fmt.Errorf("error setting stash id: %v", err) - } - } - return nil } func (i *Importer) Name() string { - return i.Path + if i.Input.Title != "" { + return i.Input.Title + } + + if len(i.Input.Files) > 0 { + return i.Input.Files[0] + } + + return "" } -func (i *Importer) FindExistingID() (*int, error) { - var existing *models.Scene +func (i *Importer) FindExistingID(ctx context.Context) (*int, error) { + var existing []*models.Scene var err error - switch i.FileNamingAlgorithm { - case models.HashAlgorithmMd5: - existing, err = i.ReaderWriter.FindByChecksum(i.Input.Checksum) - case models.HashAlgorithmOshash: - existing, err = i.ReaderWriter.FindByOSHash(i.Input.OSHash) - default: - panic("unknown file naming algorithm") - } + for _, f := range i.scene.Files.List() { + existing, err = i.ReaderWriter.FindByFileID(ctx, f.ID) + if err != nil { + return nil, err + } - if err != nil { - return nil, err - } - - if existing != nil { - id := existing.ID - return &id, nil + if len(existing) > 0 { + id := existing[0].ID + return &id, nil + } } return nil, nil } -func (i *Importer) Create() (*int, error) { - created, err := i.ReaderWriter.Create(i.scene) - if err != nil { +func (i *Importer) Create(ctx context.Context) (*int, error) { + var fileIDs []file.ID + for _, f := range i.scene.Files.List() { + fileIDs = append(fileIDs, f.Base().ID) + } + if err := i.ReaderWriter.Create(ctx, &i.scene, fileIDs); err != nil { return nil, fmt.Errorf("error creating scene: %v", err) } - id := created.ID + id := i.scene.ID i.ID = id return &id, nil } -func (i *Importer) Update(id int) error { +func (i *Importer) Update(ctx context.Context, id int) error { scene := i.scene scene.ID = id i.ID = id - _, err := i.ReaderWriter.UpdateFull(scene) - if err != nil { + if err := i.ReaderWriter.Update(ctx, &scene); err != nil { return fmt.Errorf("error updating existing scene: %v", err) } return nil } -func importTags(tagWriter models.TagReaderWriter, names []string, missingRefBehaviour models.ImportMissingRefEnum) ([]*models.Tag, error) { - tags, err := tagWriter.FindByNames(names, false) +func importTags(ctx context.Context, tagWriter tag.NameFinderCreator, names []string, missingRefBehaviour models.ImportMissingRefEnum) ([]*models.Tag, error) { + tags, err := tagWriter.FindByNames(ctx, names, false) if err != nil { return nil, err } @@ -469,7 +438,7 @@ func importTags(tagWriter models.TagReaderWriter, names []string, missingRefBeha } if missingRefBehaviour == models.ImportMissingRefEnumCreate { - createdTags, err := createTags(tagWriter, missingTags) + createdTags, err := createTags(ctx, tagWriter, missingTags) if err != nil { return nil, fmt.Errorf("error creating tags: %v", err) } @@ -483,12 +452,12 @@ func importTags(tagWriter models.TagReaderWriter, names []string, missingRefBeha return tags, nil } -func createTags(tagWriter models.TagWriter, names []string) ([]*models.Tag, error) { +func createTags(ctx context.Context, tagWriter tag.NameFinderCreator, names []string) ([]*models.Tag, error) { var ret []*models.Tag for _, name := range names { newTag := *models.NewTag(name) - created, err := tagWriter.Create(newTag) + created, err := tagWriter.Create(ctx, newTag) if err != nil { return nil, err } diff --git a/pkg/scene/import_test.go b/pkg/scene/import_test.go index 499f27299..5a5fd5026 100644 --- a/pkg/scene/import_test.go +++ b/pkg/scene/import_test.go @@ -1,6 +1,7 @@ package scene import ( + "context" "errors" "testing" @@ -13,15 +14,8 @@ import ( const invalidImage = "aW1hZ2VCeXRlcw&&" -const ( - path = "path" - - sceneNameErr = "sceneNameErr" - // existingSceneName = "existingSceneName" - - existingSceneID = 100 +var ( existingStudioID = 101 - existingGalleryID = 102 existingPerformerID = 103 existingMovieID = 104 existingTagID = 105 @@ -30,10 +24,6 @@ const ( existingStudioErr = "existingStudioErr" missingStudioName = "missingStudioName" - existingGalleryChecksum = "existingGalleryChecksum" - existingGalleryErr = "existingGalleryErr" - missingGalleryChecksum = "missingGalleryChecksum" - existingPerformerName = "existingPerformerName" existingPerformerErr = "existingPerformerErr" missingPerformerName = "missingPerformerName" @@ -45,64 +35,48 @@ const ( existingTagName = "existingTagName" existingTagErr = "existingTagErr" missingTagName = "missingTagName" - - errPerformersID = 200 - errGalleriesID = 201 - - missingChecksum = "missingChecksum" - missingOSHash = "missingOSHash" - errChecksum = "errChecksum" - errOSHash = "errOSHash" ) -func TestImporterName(t *testing.T) { - i := Importer{ - Path: path, - Input: jsonschema.Scene{}, - } - - assert.Equal(t, path, i.Name()) -} +var testCtx = context.Background() func TestImporterPreImport(t *testing.T) { i := Importer{ - Path: path, Input: jsonschema.Scene{ Cover: invalidImage, }, } - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.Input.Cover = imageBase64 - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) } func TestImporterPreImportWithStudio(t *testing.T) { studioReaderWriter := &mocks.StudioReaderWriter{} + testCtx := context.Background() i := Importer{ StudioWriter: studioReaderWriter, - Path: path, Input: jsonschema.Scene{ Studio: existingStudioName, }, } - studioReaderWriter.On("FindByName", existingStudioName, false).Return(&models.Studio{ + studioReaderWriter.On("FindByName", testCtx, existingStudioName, false).Return(&models.Studio{ ID: existingStudioID, }, nil).Once() - studioReaderWriter.On("FindByName", existingStudioErr, false).Return(nil, errors.New("FindByName error")).Once() + studioReaderWriter.On("FindByName", testCtx, existingStudioErr, false).Return(nil, errors.New("FindByName error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, int64(existingStudioID), i.scene.StudioID.Int64) + assert.Equal(t, existingStudioID, *i.scene.StudioID) i.Input.Studio = existingStudioErr - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) studioReaderWriter.AssertExpectations(t) @@ -112,7 +86,6 @@ func TestImporterPreImportWithMissingStudio(t *testing.T) { studioReaderWriter := &mocks.StudioReaderWriter{} i := Importer{ - Path: path, StudioWriter: studioReaderWriter, Input: jsonschema.Scene{ Studio: missingStudioName, @@ -120,22 +93,22 @@ func TestImporterPreImportWithMissingStudio(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - studioReaderWriter.On("FindByName", missingStudioName, false).Return(nil, nil).Times(3) - studioReaderWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(&models.Studio{ + studioReaderWriter.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Times(3) + studioReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Studio")).Return(&models.Studio{ ID: existingStudioID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, int64(existingStudioID), i.scene.StudioID.Int64) + assert.Equal(t, existingStudioID, *i.scene.StudioID) studioReaderWriter.AssertExpectations(t) } @@ -145,90 +118,24 @@ func TestImporterPreImportWithMissingStudioCreateErr(t *testing.T) { i := Importer{ StudioWriter: studioReaderWriter, - Path: path, Input: jsonschema.Scene{ Studio: missingStudioName, }, MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - studioReaderWriter.On("FindByName", missingStudioName, false).Return(nil, nil).Once() - studioReaderWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) + studioReaderWriter.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Once() + studioReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } -func TestImporterPreImportWithGallery(t *testing.T) { - galleryReaderWriter := &mocks.GalleryReaderWriter{} - - i := Importer{ - GalleryWriter: galleryReaderWriter, - Path: path, - MissingRefBehaviour: models.ImportMissingRefEnumFail, - Input: jsonschema.Scene{ - Galleries: []string{ - existingGalleryChecksum, - }, - }, - } - - galleryReaderWriter.On("FindByChecksums", []string{existingGalleryChecksum}).Return([]*models.Gallery{ - { - ID: existingGalleryID, - Checksum: existingGalleryChecksum, - }, - }, nil).Once() - - galleryReaderWriter.On("FindByChecksums", []string{existingGalleryErr}).Return(nil, errors.New("FindByChecksums error")).Once() - - err := i.PreImport() - assert.Nil(t, err) - assert.Equal(t, existingGalleryID, i.galleries[0].ID) - - i.Input.Galleries = []string{existingGalleryErr} - err = i.PreImport() - assert.NotNil(t, err) - - galleryReaderWriter.AssertExpectations(t) -} - -func TestImporterPreImportWithMissingGallery(t *testing.T) { - galleryReaderWriter := &mocks.GalleryReaderWriter{} - - i := Importer{ - Path: path, - GalleryWriter: galleryReaderWriter, - Input: jsonschema.Scene{ - Galleries: []string{ - missingGalleryChecksum, - }, - }, - MissingRefBehaviour: models.ImportMissingRefEnumFail, - } - - galleryReaderWriter.On("FindByChecksums", []string{missingGalleryChecksum}).Return(nil, nil).Times(3) - - err := i.PreImport() - assert.NotNil(t, err) - - i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() - assert.Nil(t, err) - - i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() - assert.Nil(t, err) - - galleryReaderWriter.AssertExpectations(t) -} - func TestImporterPreImportWithPerformer(t *testing.T) { performerReaderWriter := &mocks.PerformerReaderWriter{} i := Importer{ PerformerWriter: performerReaderWriter, - Path: path, MissingRefBehaviour: models.ImportMissingRefEnumFail, Input: jsonschema.Scene{ Performers: []string{ @@ -237,20 +144,20 @@ func TestImporterPreImportWithPerformer(t *testing.T) { }, } - performerReaderWriter.On("FindByNames", []string{existingPerformerName}, false).Return([]*models.Performer{ + performerReaderWriter.On("FindByNames", testCtx, []string{existingPerformerName}, false).Return([]*models.Performer{ { ID: existingPerformerID, Name: models.NullString(existingPerformerName), }, }, nil).Once() - performerReaderWriter.On("FindByNames", []string{existingPerformerErr}, false).Return(nil, errors.New("FindByNames error")).Once() + performerReaderWriter.On("FindByNames", testCtx, []string{existingPerformerErr}, false).Return(nil, errors.New("FindByNames error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingPerformerID, i.performers[0].ID) + assert.Equal(t, []int{existingPerformerID}, i.scene.PerformerIDs.List()) i.Input.Performers = []string{existingPerformerErr} - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) performerReaderWriter.AssertExpectations(t) @@ -260,7 +167,6 @@ func TestImporterPreImportWithMissingPerformer(t *testing.T) { performerReaderWriter := &mocks.PerformerReaderWriter{} i := Importer{ - Path: path, PerformerWriter: performerReaderWriter, Input: jsonschema.Scene{ Performers: []string{ @@ -270,22 +176,22 @@ func TestImporterPreImportWithMissingPerformer(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - performerReaderWriter.On("FindByNames", []string{missingPerformerName}, false).Return(nil, nil).Times(3) - performerReaderWriter.On("Create", mock.AnythingOfType("models.Performer")).Return(&models.Performer{ + performerReaderWriter.On("FindByNames", testCtx, []string{missingPerformerName}, false).Return(nil, nil).Times(3) + performerReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Performer")).Return(&models.Performer{ ID: existingPerformerID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingPerformerID, i.performers[0].ID) + assert.Equal(t, []int{existingPerformerID}, i.scene.PerformerIDs.List()) performerReaderWriter.AssertExpectations(t) } @@ -295,7 +201,6 @@ func TestImporterPreImportWithMissingPerformerCreateErr(t *testing.T) { i := Importer{ PerformerWriter: performerReaderWriter, - Path: path, Input: jsonschema.Scene{ Performers: []string{ missingPerformerName, @@ -304,19 +209,19 @@ func TestImporterPreImportWithMissingPerformerCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - performerReaderWriter.On("FindByNames", []string{missingPerformerName}, false).Return(nil, nil).Once() - performerReaderWriter.On("Create", mock.AnythingOfType("models.Performer")).Return(nil, errors.New("Create error")) + performerReaderWriter.On("FindByNames", testCtx, []string{missingPerformerName}, false).Return(nil, nil).Once() + performerReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Performer")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } func TestImporterPreImportWithMovie(t *testing.T) { movieReaderWriter := &mocks.MovieReaderWriter{} + testCtx := context.Background() i := Importer{ MovieWriter: movieReaderWriter, - Path: path, MissingRefBehaviour: models.ImportMissingRefEnumFail, Input: jsonschema.Scene{ Movies: []jsonschema.SceneMovie{ @@ -328,18 +233,18 @@ func TestImporterPreImportWithMovie(t *testing.T) { }, } - movieReaderWriter.On("FindByName", existingMovieName, false).Return(&models.Movie{ + movieReaderWriter.On("FindByName", testCtx, existingMovieName, false).Return(&models.Movie{ ID: existingMovieID, Name: models.NullString(existingMovieName), }, nil).Once() - movieReaderWriter.On("FindByName", existingMovieErr, false).Return(nil, errors.New("FindByName error")).Once() + movieReaderWriter.On("FindByName", testCtx, existingMovieErr, false).Return(nil, errors.New("FindByName error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingMovieID, i.movies[0].MovieID) + assert.Equal(t, existingMovieID, i.scene.Movies.List()[0].MovieID) i.Input.Movies[0].MovieName = existingMovieErr - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) movieReaderWriter.AssertExpectations(t) @@ -347,9 +252,9 @@ func TestImporterPreImportWithMovie(t *testing.T) { func TestImporterPreImportWithMissingMovie(t *testing.T) { movieReaderWriter := &mocks.MovieReaderWriter{} + testCtx := context.Background() i := Importer{ - Path: path, MovieWriter: movieReaderWriter, Input: jsonschema.Scene{ Movies: []jsonschema.SceneMovie{ @@ -361,22 +266,22 @@ func TestImporterPreImportWithMissingMovie(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - movieReaderWriter.On("FindByName", missingMovieName, false).Return(nil, nil).Times(3) - movieReaderWriter.On("Create", mock.AnythingOfType("models.Movie")).Return(&models.Movie{ + movieReaderWriter.On("FindByName", testCtx, missingMovieName, false).Return(nil, nil).Times(3) + movieReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Movie")).Return(&models.Movie{ ID: existingMovieID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingMovieID, i.movies[0].MovieID) + assert.Equal(t, existingMovieID, i.scene.Movies.List()[0].MovieID) movieReaderWriter.AssertExpectations(t) } @@ -386,7 +291,6 @@ func TestImporterPreImportWithMissingMovieCreateErr(t *testing.T) { i := Importer{ MovieWriter: movieReaderWriter, - Path: path, Input: jsonschema.Scene{ Movies: []jsonschema.SceneMovie{ { @@ -397,10 +301,10 @@ func TestImporterPreImportWithMissingMovieCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - movieReaderWriter.On("FindByName", missingMovieName, false).Return(nil, nil).Once() - movieReaderWriter.On("Create", mock.AnythingOfType("models.Movie")).Return(nil, errors.New("Create error")) + movieReaderWriter.On("FindByName", testCtx, missingMovieName, false).Return(nil, nil).Once() + movieReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Movie")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } @@ -409,7 +313,6 @@ func TestImporterPreImportWithTag(t *testing.T) { i := Importer{ TagWriter: tagReaderWriter, - Path: path, MissingRefBehaviour: models.ImportMissingRefEnumFail, Input: jsonschema.Scene{ Tags: []string{ @@ -418,20 +321,20 @@ func TestImporterPreImportWithTag(t *testing.T) { }, } - tagReaderWriter.On("FindByNames", []string{existingTagName}, false).Return([]*models.Tag{ + tagReaderWriter.On("FindByNames", testCtx, []string{existingTagName}, false).Return([]*models.Tag{ { ID: existingTagID, Name: existingTagName, }, }, nil).Once() - tagReaderWriter.On("FindByNames", []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Once() + tagReaderWriter.On("FindByNames", testCtx, []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Once() - err := i.PreImport() + err := i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingTagID, i.tags[0].ID) + assert.Equal(t, []int{existingTagID}, i.scene.TagIDs.List()) i.Input.Tags = []string{existingTagErr} - err = i.PreImport() + err = i.PreImport(testCtx) assert.NotNil(t, err) tagReaderWriter.AssertExpectations(t) @@ -441,7 +344,6 @@ func TestImporterPreImportWithMissingTag(t *testing.T) { tagReaderWriter := &mocks.TagReaderWriter{} i := Importer{ - Path: path, TagWriter: tagReaderWriter, Input: jsonschema.Scene{ Tags: []string{ @@ -451,22 +353,22 @@ func TestImporterPreImportWithMissingTag(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - tagReaderWriter.On("FindByNames", []string{missingTagName}, false).Return(nil, nil).Times(3) - tagReaderWriter.On("Create", mock.AnythingOfType("models.Tag")).Return(&models.Tag{ + tagReaderWriter.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Times(3) + tagReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(&models.Tag{ ID: existingTagID, }, nil) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) - assert.Equal(t, existingTagID, i.tags[0].ID) + assert.Equal(t, []int{existingTagID}, i.scene.TagIDs.List()) tagReaderWriter.AssertExpectations(t) } @@ -476,7 +378,6 @@ func TestImporterPreImportWithMissingTagCreateErr(t *testing.T) { i := Importer{ TagWriter: tagReaderWriter, - Path: path, Input: jsonschema.Scene{ Tags: []string{ missingTagName, @@ -485,271 +386,9 @@ func TestImporterPreImportWithMissingTagCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - tagReaderWriter.On("FindByNames", []string{missingTagName}, false).Return(nil, nil).Once() - tagReaderWriter.On("Create", mock.AnythingOfType("models.Tag")).Return(nil, errors.New("Create error")) + tagReaderWriter.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Once() + tagReaderWriter.On("Create", testCtx, mock.AnythingOfType("models.Tag")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) } - -func TestImporterPostImport(t *testing.T) { - readerWriter := &mocks.SceneReaderWriter{} - - i := Importer{ - ReaderWriter: readerWriter, - coverImageData: imageBytes, - } - - updateSceneImageErr := errors.New("UpdateCover error") - - readerWriter.On("UpdateCover", sceneID, imageBytes).Return(nil).Once() - readerWriter.On("UpdateCover", errImageID, imageBytes).Return(updateSceneImageErr).Once() - - err := i.PostImport(sceneID) - assert.Nil(t, err) - - err = i.PostImport(errImageID) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestImporterPostImportUpdateGalleries(t *testing.T) { - sceneReaderWriter := &mocks.SceneReaderWriter{} - - i := Importer{ - ReaderWriter: sceneReaderWriter, - galleries: []*models.Gallery{ - { - ID: existingGalleryID, - }, - }, - } - - updateErr := errors.New("UpdateGalleries error") - - sceneReaderWriter.On("UpdateGalleries", sceneID, []int{existingGalleryID}).Return(nil).Once() - sceneReaderWriter.On("UpdateGalleries", errGalleriesID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - - err := i.PostImport(sceneID) - assert.Nil(t, err) - - err = i.PostImport(errGalleriesID) - assert.NotNil(t, err) - - sceneReaderWriter.AssertExpectations(t) -} - -func TestImporterPostImportUpdatePerformers(t *testing.T) { - sceneReaderWriter := &mocks.SceneReaderWriter{} - - i := Importer{ - ReaderWriter: sceneReaderWriter, - performers: []*models.Performer{ - { - ID: existingPerformerID, - }, - }, - } - - updateErr := errors.New("UpdatePerformers error") - - sceneReaderWriter.On("UpdatePerformers", sceneID, []int{existingPerformerID}).Return(nil).Once() - sceneReaderWriter.On("UpdatePerformers", errPerformersID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - - err := i.PostImport(sceneID) - assert.Nil(t, err) - - err = i.PostImport(errPerformersID) - assert.NotNil(t, err) - - sceneReaderWriter.AssertExpectations(t) -} - -func TestImporterPostImportUpdateMovies(t *testing.T) { - sceneReaderWriter := &mocks.SceneReaderWriter{} - - i := Importer{ - ReaderWriter: sceneReaderWriter, - movies: []models.MoviesScenes{ - { - MovieID: existingMovieID, - }, - }, - } - - updateErr := errors.New("UpdateMovies error") - - sceneReaderWriter.On("UpdateMovies", sceneID, []models.MoviesScenes{ - { - MovieID: existingMovieID, - SceneID: sceneID, - }, - }).Return(nil).Once() - sceneReaderWriter.On("UpdateMovies", errMoviesID, mock.AnythingOfType("[]models.MoviesScenes")).Return(updateErr).Once() - - err := i.PostImport(sceneID) - assert.Nil(t, err) - - err = i.PostImport(errMoviesID) - assert.NotNil(t, err) - - sceneReaderWriter.AssertExpectations(t) -} - -func TestImporterPostImportUpdateTags(t *testing.T) { - sceneReaderWriter := &mocks.SceneReaderWriter{} - - i := Importer{ - ReaderWriter: sceneReaderWriter, - tags: []*models.Tag{ - { - ID: existingTagID, - }, - }, - } - - updateErr := errors.New("UpdateTags error") - - sceneReaderWriter.On("UpdateTags", sceneID, []int{existingTagID}).Return(nil).Once() - sceneReaderWriter.On("UpdateTags", errTagsID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - - err := i.PostImport(sceneID) - assert.Nil(t, err) - - err = i.PostImport(errTagsID) - assert.NotNil(t, err) - - sceneReaderWriter.AssertExpectations(t) -} - -func TestImporterFindExistingID(t *testing.T) { - readerWriter := &mocks.SceneReaderWriter{} - - i := Importer{ - ReaderWriter: readerWriter, - Path: path, - Input: jsonschema.Scene{ - Checksum: missingChecksum, - OSHash: missingOSHash, - }, - FileNamingAlgorithm: models.HashAlgorithmMd5, - } - - expectedErr := errors.New("FindBy* error") - readerWriter.On("FindByChecksum", missingChecksum).Return(nil, nil).Once() - readerWriter.On("FindByChecksum", checksum).Return(&models.Scene{ - ID: existingSceneID, - }, nil).Once() - readerWriter.On("FindByChecksum", errChecksum).Return(nil, expectedErr).Once() - - readerWriter.On("FindByOSHash", missingOSHash).Return(nil, nil).Once() - readerWriter.On("FindByOSHash", oshash).Return(&models.Scene{ - ID: existingSceneID, - }, nil).Once() - readerWriter.On("FindByOSHash", errOSHash).Return(nil, expectedErr).Once() - - id, err := i.FindExistingID() - assert.Nil(t, id) - assert.Nil(t, err) - - i.Input.Checksum = checksum - id, err = i.FindExistingID() - assert.Equal(t, existingSceneID, *id) - assert.Nil(t, err) - - i.Input.Checksum = errChecksum - id, err = i.FindExistingID() - assert.Nil(t, id) - assert.NotNil(t, err) - - i.FileNamingAlgorithm = models.HashAlgorithmOshash - id, err = i.FindExistingID() - assert.Nil(t, id) - assert.Nil(t, err) - - i.Input.OSHash = oshash - id, err = i.FindExistingID() - assert.Equal(t, existingSceneID, *id) - assert.Nil(t, err) - - i.Input.OSHash = errOSHash - id, err = i.FindExistingID() - assert.Nil(t, id) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestCreate(t *testing.T) { - readerWriter := &mocks.SceneReaderWriter{} - - scene := models.Scene{ - Title: models.NullString(title), - } - - sceneErr := models.Scene{ - Title: models.NullString(sceneNameErr), - } - - i := Importer{ - ReaderWriter: readerWriter, - scene: scene, - } - - errCreate := errors.New("Create error") - readerWriter.On("Create", scene).Return(&models.Scene{ - ID: sceneID, - }, nil).Once() - readerWriter.On("Create", sceneErr).Return(nil, errCreate).Once() - - id, err := i.Create() - assert.Equal(t, sceneID, *id) - assert.Nil(t, err) - assert.Equal(t, sceneID, i.ID) - - i.scene = sceneErr - id, err = i.Create() - assert.Nil(t, id) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} - -func TestUpdate(t *testing.T) { - readerWriter := &mocks.SceneReaderWriter{} - - scene := models.Scene{ - Title: models.NullString(title), - } - - sceneErr := models.Scene{ - Title: models.NullString(sceneNameErr), - } - - i := Importer{ - ReaderWriter: readerWriter, - scene: scene, - } - - errUpdate := errors.New("Update error") - - // id needs to be set for the mock input - scene.ID = sceneID - readerWriter.On("UpdateFull", scene).Return(nil, nil).Once() - - err := i.Update(sceneID) - assert.Nil(t, err) - assert.Equal(t, sceneID, i.ID) - - i.scene = sceneErr - - // need to set id separately - sceneErr.ID = errImageID - readerWriter.On("UpdateFull", sceneErr).Return(nil, errUpdate).Once() - - err = i.Update(errImageID) - assert.NotNil(t, err) - - readerWriter.AssertExpectations(t) -} diff --git a/pkg/scene/marker_import.go b/pkg/scene/marker_import.go index 530d025ea..32f6deb65 100644 --- a/pkg/scene/marker_import.go +++ b/pkg/scene/marker_import.go @@ -1,18 +1,27 @@ package scene import ( + "context" "database/sql" "fmt" "strconv" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/tag" ) +type MarkerCreatorUpdater interface { + Create(ctx context.Context, newSceneMarker models.SceneMarker) (*models.SceneMarker, error) + Update(ctx context.Context, updatedSceneMarker models.SceneMarker) (*models.SceneMarker, error) + FindBySceneID(ctx context.Context, sceneID int) ([]*models.SceneMarker, error) + UpdateTags(ctx context.Context, markerID int, tagIDs []int) error +} + type MarkerImporter struct { SceneID int - ReaderWriter models.SceneMarkerReaderWriter - TagWriter models.TagReaderWriter + ReaderWriter MarkerCreatorUpdater + TagWriter tag.NameFinderCreator Input jsonschema.SceneMarker MissingRefBehaviour models.ImportMissingRefEnum @@ -20,7 +29,7 @@ type MarkerImporter struct { marker models.SceneMarker } -func (i *MarkerImporter) PreImport() error { +func (i *MarkerImporter) PreImport(ctx context.Context) error { seconds, _ := strconv.ParseFloat(i.Input.Seconds, 64) i.marker = models.SceneMarker{ Title: i.Input.Title, @@ -30,21 +39,21 @@ func (i *MarkerImporter) PreImport() error { UpdatedAt: models.SQLiteTimestamp{Timestamp: i.Input.UpdatedAt.GetTime()}, } - if err := i.populateTags(); err != nil { + if err := i.populateTags(ctx); err != nil { return err } return nil } -func (i *MarkerImporter) populateTags() error { +func (i *MarkerImporter) populateTags(ctx context.Context) error { // primary tag cannot be ignored mrb := i.MissingRefBehaviour if mrb == models.ImportMissingRefEnumIgnore { mrb = models.ImportMissingRefEnumFail } - primaryTag, err := importTags(i.TagWriter, []string{i.Input.PrimaryTag}, mrb) + primaryTag, err := importTags(ctx, i.TagWriter, []string{i.Input.PrimaryTag}, mrb) if err != nil { return err } @@ -52,7 +61,7 @@ func (i *MarkerImporter) populateTags() error { i.marker.PrimaryTagID = primaryTag[0].ID if len(i.Input.Tags) > 0 { - tags, err := importTags(i.TagWriter, i.Input.Tags, i.MissingRefBehaviour) + tags, err := importTags(ctx, i.TagWriter, i.Input.Tags, i.MissingRefBehaviour) if err != nil { return err } @@ -63,13 +72,13 @@ func (i *MarkerImporter) populateTags() error { return nil } -func (i *MarkerImporter) PostImport(id int) error { +func (i *MarkerImporter) PostImport(ctx context.Context, id int) error { if len(i.tags) > 0 { var tagIDs []int for _, t := range i.tags { tagIDs = append(tagIDs, t.ID) } - if err := i.ReaderWriter.UpdateTags(id, tagIDs); err != nil { + if err := i.ReaderWriter.UpdateTags(ctx, id, tagIDs); err != nil { return fmt.Errorf("failed to associate tags: %v", err) } } @@ -81,8 +90,8 @@ func (i *MarkerImporter) Name() string { return fmt.Sprintf("%s (%s)", i.Input.Title, i.Input.Seconds) } -func (i *MarkerImporter) FindExistingID() (*int, error) { - existingMarkers, err := i.ReaderWriter.FindBySceneID(i.SceneID) +func (i *MarkerImporter) FindExistingID(ctx context.Context) (*int, error) { + existingMarkers, err := i.ReaderWriter.FindBySceneID(ctx, i.SceneID) if err != nil { return nil, err @@ -98,8 +107,8 @@ func (i *MarkerImporter) FindExistingID() (*int, error) { return nil, nil } -func (i *MarkerImporter) Create() (*int, error) { - created, err := i.ReaderWriter.Create(i.marker) +func (i *MarkerImporter) Create(ctx context.Context) (*int, error) { + created, err := i.ReaderWriter.Create(ctx, i.marker) if err != nil { return nil, fmt.Errorf("error creating marker: %v", err) } @@ -108,10 +117,10 @@ func (i *MarkerImporter) Create() (*int, error) { return &id, nil } -func (i *MarkerImporter) Update(id int) error { +func (i *MarkerImporter) Update(ctx context.Context, id int) error { marker := i.marker marker.ID = id - _, err := i.ReaderWriter.Update(marker) + _, err := i.ReaderWriter.Update(ctx, marker) if err != nil { return fmt.Errorf("error updating existing marker: %v", err) } diff --git a/pkg/scene/marker_import_test.go b/pkg/scene/marker_import_test.go index 0aa72a08b..86fba3f8c 100644 --- a/pkg/scene/marker_import_test.go +++ b/pkg/scene/marker_import_test.go @@ -1,205 +1,211 @@ package scene -import ( - "errors" - "testing" +// import ( +// "context" +// "errors" +// "testing" - "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/models/jsonschema" - "github.com/stashapp/stash/pkg/models/mocks" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) +// "github.com/stashapp/stash/pkg/models" +// "github.com/stashapp/stash/pkg/models/jsonschema" +// "github.com/stashapp/stash/pkg/models/mocks" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/mock" +// ) -const ( - seconds = "5" - secondsFloat = 5.0 - errSceneID = 999 -) +// const ( +// seconds = "5" +// secondsFloat = 5.0 +// errSceneID = 999 +// ) -func TestMarkerImporterName(t *testing.T) { - i := MarkerImporter{ - Input: jsonschema.SceneMarker{ - Title: title, - Seconds: seconds, - }, - } +// func TestMarkerImporterName(t *testing.T) { +// i := MarkerImporter{ +// Input: jsonschema.SceneMarker{ +// Title: title, +// Seconds: seconds, +// }, +// } - assert.Equal(t, title+" (5)", i.Name()) -} +// assert.Equal(t, title+" (5)", i.Name()) +// } -func TestMarkerImporterPreImportWithTag(t *testing.T) { - tagReaderWriter := &mocks.TagReaderWriter{} +// func TestMarkerImporterPreImportWithTag(t *testing.T) { +// tagReaderWriter := &mocks.TagReaderWriter{} +// ctx := context.Background() - i := MarkerImporter{ - TagWriter: tagReaderWriter, - MissingRefBehaviour: models.ImportMissingRefEnumFail, - Input: jsonschema.SceneMarker{ - PrimaryTag: existingTagName, - }, - } +// i := MarkerImporter{ +// TagWriter: tagReaderWriter, +// MissingRefBehaviour: models.ImportMissingRefEnumFail, +// Input: jsonschema.SceneMarker{ +// PrimaryTag: existingTagName, +// }, +// } - tagReaderWriter.On("FindByNames", []string{existingTagName}, false).Return([]*models.Tag{ - { - ID: existingTagID, - Name: existingTagName, - }, - }, nil).Times(4) - tagReaderWriter.On("FindByNames", []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Times(2) +// tagReaderWriter.On("FindByNames", ctx, []string{existingTagName}, false).Return([]*models.Tag{ +// { +// ID: existingTagID, +// Name: existingTagName, +// }, +// }, nil).Times(4) +// tagReaderWriter.On("FindByNames", ctx, []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Times(2) - err := i.PreImport() - assert.Nil(t, err) - assert.Equal(t, existingTagID, i.marker.PrimaryTagID) +// err := i.PreImport(ctx) +// assert.Nil(t, err) +// assert.Equal(t, existingTagID, i.marker.PrimaryTagID) - i.Input.PrimaryTag = existingTagErr - err = i.PreImport() - assert.NotNil(t, err) +// i.Input.PrimaryTag = existingTagErr +// err = i.PreImport(ctx) +// assert.NotNil(t, err) - i.Input.PrimaryTag = existingTagName - i.Input.Tags = []string{ - existingTagName, - } - err = i.PreImport() - assert.Nil(t, err) - assert.Equal(t, existingTagID, i.tags[0].ID) +// i.Input.PrimaryTag = existingTagName +// i.Input.Tags = []string{ +// existingTagName, +// } +// err = i.PreImport(ctx) +// assert.Nil(t, err) +// assert.Equal(t, existingTagID, i.tags[0].ID) - i.Input.Tags[0] = existingTagErr - err = i.PreImport() - assert.NotNil(t, err) +// i.Input.Tags[0] = existingTagErr +// err = i.PreImport(ctx) +// assert.NotNil(t, err) - tagReaderWriter.AssertExpectations(t) -} +// tagReaderWriter.AssertExpectations(t) +// } -func TestMarkerImporterPostImportUpdateTags(t *testing.T) { - sceneMarkerReaderWriter := &mocks.SceneMarkerReaderWriter{} +// func TestMarkerImporterPostImportUpdateTags(t *testing.T) { +// sceneMarkerReaderWriter := &mocks.SceneMarkerReaderWriter{} +// ctx := context.Background() - i := MarkerImporter{ - ReaderWriter: sceneMarkerReaderWriter, - tags: []*models.Tag{ - { - ID: existingTagID, - }, - }, - } +// i := MarkerImporter{ +// ReaderWriter: sceneMarkerReaderWriter, +// tags: []*models.Tag{ +// { +// ID: existingTagID, +// }, +// }, +// } - updateErr := errors.New("UpdateTags error") +// updateErr := errors.New("UpdateTags error") - sceneMarkerReaderWriter.On("UpdateTags", sceneID, []int{existingTagID}).Return(nil).Once() - sceneMarkerReaderWriter.On("UpdateTags", errTagsID, mock.AnythingOfType("[]int")).Return(updateErr).Once() +// sceneMarkerReaderWriter.On("UpdateTags", ctx, sceneID, []int{existingTagID}).Return(nil).Once() +// sceneMarkerReaderWriter.On("UpdateTags", ctx, errTagsID, mock.AnythingOfType("[]int")).Return(updateErr).Once() - err := i.PostImport(sceneID) - assert.Nil(t, err) +// err := i.PostImport(ctx, sceneID) +// assert.Nil(t, err) - err = i.PostImport(errTagsID) - assert.NotNil(t, err) +// err = i.PostImport(ctx, errTagsID) +// assert.NotNil(t, err) - sceneMarkerReaderWriter.AssertExpectations(t) -} +// sceneMarkerReaderWriter.AssertExpectations(t) +// } -func TestMarkerImporterFindExistingID(t *testing.T) { - readerWriter := &mocks.SceneMarkerReaderWriter{} +// func TestMarkerImporterFindExistingID(t *testing.T) { +// readerWriter := &mocks.SceneMarkerReaderWriter{} +// ctx := context.Background() - i := MarkerImporter{ - ReaderWriter: readerWriter, - SceneID: sceneID, - marker: models.SceneMarker{ - Seconds: secondsFloat, - }, - } +// i := MarkerImporter{ +// ReaderWriter: readerWriter, +// SceneID: sceneID, +// marker: models.SceneMarker{ +// Seconds: secondsFloat, +// }, +// } - expectedErr := errors.New("FindBy* error") - readerWriter.On("FindBySceneID", sceneID).Return([]*models.SceneMarker{ - { - ID: existingSceneID, - Seconds: secondsFloat, - }, - }, nil).Times(2) - readerWriter.On("FindBySceneID", errSceneID).Return(nil, expectedErr).Once() +// expectedErr := errors.New("FindBy* error") +// readerWriter.On("FindBySceneID", ctx, sceneID).Return([]*models.SceneMarker{ +// { +// ID: existingSceneID, +// Seconds: secondsFloat, +// }, +// }, nil).Times(2) +// readerWriter.On("FindBySceneID", ctx, errSceneID).Return(nil, expectedErr).Once() - id, err := i.FindExistingID() - assert.Equal(t, existingSceneID, *id) - assert.Nil(t, err) +// id, err := i.FindExistingID(ctx) +// assert.Equal(t, existingSceneID, *id) +// assert.Nil(t, err) - i.marker.Seconds++ - id, err = i.FindExistingID() - assert.Nil(t, id) - assert.Nil(t, err) +// i.marker.Seconds++ +// id, err = i.FindExistingID(ctx) +// assert.Nil(t, id) +// assert.Nil(t, err) - i.SceneID = errSceneID - id, err = i.FindExistingID() - assert.Nil(t, id) - assert.NotNil(t, err) +// i.SceneID = errSceneID +// id, err = i.FindExistingID(ctx) +// assert.Nil(t, id) +// assert.NotNil(t, err) - readerWriter.AssertExpectations(t) -} +// readerWriter.AssertExpectations(t) +// } -func TestMarkerImporterCreate(t *testing.T) { - readerWriter := &mocks.SceneMarkerReaderWriter{} +// func TestMarkerImporterCreate(t *testing.T) { +// readerWriter := &mocks.SceneMarkerReaderWriter{} +// ctx := context.Background() - scene := models.SceneMarker{ - Title: title, - } +// scene := models.SceneMarker{ +// Title: title, +// } - sceneErr := models.SceneMarker{ - Title: sceneNameErr, - } +// sceneErr := models.SceneMarker{ +// Title: sceneNameErr, +// } - i := MarkerImporter{ - ReaderWriter: readerWriter, - marker: scene, - } +// i := MarkerImporter{ +// ReaderWriter: readerWriter, +// marker: scene, +// } - errCreate := errors.New("Create error") - readerWriter.On("Create", scene).Return(&models.SceneMarker{ - ID: sceneID, - }, nil).Once() - readerWriter.On("Create", sceneErr).Return(nil, errCreate).Once() +// errCreate := errors.New("Create error") +// readerWriter.On("Create", ctx, scene).Return(&models.SceneMarker{ +// ID: sceneID, +// }, nil).Once() +// readerWriter.On("Create", ctx, sceneErr).Return(nil, errCreate).Once() - id, err := i.Create() - assert.Equal(t, sceneID, *id) - assert.Nil(t, err) +// id, err := i.Create(ctx) +// assert.Equal(t, sceneID, *id) +// assert.Nil(t, err) - i.marker = sceneErr - id, err = i.Create() - assert.Nil(t, id) - assert.NotNil(t, err) +// i.marker = sceneErr +// id, err = i.Create(ctx) +// assert.Nil(t, id) +// assert.NotNil(t, err) - readerWriter.AssertExpectations(t) -} +// readerWriter.AssertExpectations(t) +// } -func TestMarkerImporterUpdate(t *testing.T) { - readerWriter := &mocks.SceneMarkerReaderWriter{} +// func TestMarkerImporterUpdate(t *testing.T) { +// readerWriter := &mocks.SceneMarkerReaderWriter{} +// ctx := context.Background() - scene := models.SceneMarker{ - Title: title, - } +// scene := models.SceneMarker{ +// Title: title, +// } - sceneErr := models.SceneMarker{ - Title: sceneNameErr, - } +// sceneErr := models.SceneMarker{ +// Title: sceneNameErr, +// } - i := MarkerImporter{ - ReaderWriter: readerWriter, - marker: scene, - } +// i := MarkerImporter{ +// ReaderWriter: readerWriter, +// marker: scene, +// } - errUpdate := errors.New("Update error") +// errUpdate := errors.New("Update error") - // id needs to be set for the mock input - scene.ID = sceneID - readerWriter.On("Update", scene).Return(nil, nil).Once() +// // id needs to be set for the mock input +// scene.ID = sceneID +// readerWriter.On("Update", ctx, scene).Return(nil, nil).Once() - err := i.Update(sceneID) - assert.Nil(t, err) +// err := i.Update(ctx, sceneID) +// assert.Nil(t, err) - i.marker = sceneErr +// i.marker = sceneErr - // need to set id separately - sceneErr.ID = errImageID - readerWriter.On("Update", sceneErr).Return(nil, errUpdate).Once() +// // need to set id separately +// sceneErr.ID = errImageID +// readerWriter.On("Update", ctx, sceneErr).Return(nil, errUpdate).Once() - err = i.Update(errImageID) - assert.NotNil(t, err) +// err = i.Update(ctx, errImageID) +// assert.NotNil(t, err) - readerWriter.AssertExpectations(t) -} +// readerWriter.AssertExpectations(t) +// } diff --git a/pkg/scene/query.go b/pkg/scene/query.go index f560430c3..928270f38 100644 --- a/pkg/scene/query.go +++ b/pkg/scene/query.go @@ -11,7 +11,11 @@ import ( ) type Queryer interface { - Query(options models.SceneQueryOptions) (*models.SceneQueryResult, error) + Query(ctx context.Context, options models.SceneQueryOptions) (*models.SceneQueryResult, error) +} + +type IDFinder interface { + Find(ctx context.Context, id int) (*models.Scene, error) } // QueryOptions returns a SceneQueryOptions populated with the provided filters. @@ -26,15 +30,15 @@ func QueryOptions(sceneFilter *models.SceneFilterType, findFilter *models.FindFi } // QueryWithCount queries for scenes, returning the scene objects and the total count. -func QueryWithCount(qb Queryer, sceneFilter *models.SceneFilterType, findFilter *models.FindFilterType) ([]*models.Scene, int, error) { +func QueryWithCount(ctx context.Context, qb Queryer, sceneFilter *models.SceneFilterType, findFilter *models.FindFilterType) ([]*models.Scene, int, error) { // this was moved from the queryBuilder code // left here so that calling functions can reference this instead - result, err := qb.Query(QueryOptions(sceneFilter, findFilter, true)) + result, err := qb.Query(ctx, QueryOptions(sceneFilter, findFilter, true)) if err != nil { return nil, 0, err } - scenes, err := result.Resolve() + scenes, err := result.Resolve(ctx) if err != nil { return nil, 0, err } @@ -43,13 +47,13 @@ func QueryWithCount(qb Queryer, sceneFilter *models.SceneFilterType, findFilter } // Query queries for scenes using the provided filters. -func Query(qb Queryer, sceneFilter *models.SceneFilterType, findFilter *models.FindFilterType) ([]*models.Scene, error) { - result, err := qb.Query(QueryOptions(sceneFilter, findFilter, false)) +func Query(ctx context.Context, qb Queryer, sceneFilter *models.SceneFilterType, findFilter *models.FindFilterType) ([]*models.Scene, error) { + result, err := qb.Query(ctx, QueryOptions(sceneFilter, findFilter, false)) if err != nil { return nil, err } - scenes, err := result.Resolve() + scenes, err := result.Resolve(ctx) if err != nil { return nil, err } @@ -57,7 +61,7 @@ func Query(qb Queryer, sceneFilter *models.SceneFilterType, findFilter *models.F return scenes, nil } -func BatchProcess(ctx context.Context, reader models.SceneReader, sceneFilter *models.SceneFilterType, findFilter *models.FindFilterType, fn func(scene *models.Scene) error) error { +func BatchProcess(ctx context.Context, reader Queryer, sceneFilter *models.SceneFilterType, findFilter *models.FindFilterType, fn func(scene *models.Scene) error) error { const batchSize = 1000 if findFilter == nil { @@ -74,7 +78,7 @@ func BatchProcess(ctx context.Context, reader models.SceneReader, sceneFilter *m return nil } - scenes, err := Query(reader, sceneFilter, findFilter) + scenes, err := Query(ctx, reader, sceneFilter, findFilter) if err != nil { return fmt.Errorf("error querying for scenes: %w", err) } diff --git a/pkg/scene/scan.go b/pkg/scene/scan.go index 1f33fa9ff..cf9b0d6fc 100644 --- a/pkg/scene/scan.go +++ b/pkg/scene/scan.go @@ -2,378 +2,139 @@ package scene import ( "context" - "database/sql" + "errors" "fmt" - "os" - "path/filepath" - "strconv" - "strings" "time" - "github.com/stashapp/stash/pkg/ffmpeg" "github.com/stashapp/stash/pkg/file" - "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/models/paths" "github.com/stashapp/stash/pkg/plugin" - "github.com/stashapp/stash/pkg/utils" ) -const mutexType = "scene" +var ( + ErrNotVideoFile = errors.New("not a video file") +) -type videoFileCreator interface { - NewVideoFile(path string) (*ffmpeg.VideoFile, error) +type CreatorUpdater interface { + FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Scene, error) + FindByFingerprints(ctx context.Context, fp []file.Fingerprint) ([]*models.Scene, error) + Create(ctx context.Context, newScene *models.Scene, fileIDs []file.ID) error + UpdatePartial(ctx context.Context, id int, updatedScene models.ScenePartial) (*models.Scene, error) + AddFileID(ctx context.Context, id int, fileID file.ID) error + models.VideoFileLoader } -type Scanner struct { - file.Scanner - - StripFileExtension bool - UseFileMetadata bool - FileNamingAlgorithm models.HashAlgorithm - - CaseSensitiveFs bool - TxnManager models.TransactionManager - Paths *paths.Paths - Screenshotter screenshotter - VideoFileCreator videoFileCreator - PluginCache *plugin.Cache - MutexManager *utils.MutexManager +type ScanGenerator interface { + Generate(ctx context.Context, s *models.Scene, f *file.VideoFile) error } -func FileScanner(hasher file.Hasher, fileNamingAlgorithm models.HashAlgorithm, calculateMD5 bool) file.Scanner { - return file.Scanner{ - Hasher: hasher, - CalculateOSHash: true, - CalculateMD5: fileNamingAlgorithm == models.HashAlgorithmMd5 || calculateMD5, - } +type ScanHandler struct { + CreatorUpdater CreatorUpdater + + CoverGenerator CoverGenerator + ScanGenerator ScanGenerator + PluginCache *plugin.Cache } -func (scanner *Scanner) ScanExisting(ctx context.Context, existing file.FileBased, file file.SourceFile) (err error) { - scanned, err := scanner.Scanner.ScanExisting(existing, file) - if err != nil { - return err +func (h *ScanHandler) validate() error { + if h.CreatorUpdater == nil { + return errors.New("CreatorUpdater is required") } - - s := existing.(*models.Scene) - - path := scanned.New.Path - interactive := getInteractive(path) - - oldHash := s.GetHash(scanner.FileNamingAlgorithm) - changed := false - - var videoFile *ffmpeg.VideoFile - - if scanned.ContentsChanged() { - logger.Infof("%s has been updated: rescanning", path) - - s.SetFile(*scanned.New) - - videoFile, err = scanner.VideoFileCreator.NewVideoFile(path) - if err != nil { - return err - } - - if err := videoFileToScene(s, videoFile); err != nil { - return err - } - changed = true - } else if scanned.FileUpdated() || s.Interactive != interactive { - logger.Infof("Updated scene file %s", path) - - // update fields as needed - s.SetFile(*scanned.New) - changed = true + if h.CoverGenerator == nil { + return errors.New("CoverGenerator is required") } - - // check for container - if !s.Format.Valid { - if videoFile == nil { - videoFile, err = scanner.VideoFileCreator.NewVideoFile(path) - if err != nil { - return err - } - } - container, err := ffmpeg.MatchContainer(videoFile.Container, path) - if err != nil { - return fmt.Errorf("getting container for %s: %w", path, err) - } - logger.Infof("Adding container %s to file %s", container, path) - s.Format = models.NullString(string(container)) - changed = true + if h.ScanGenerator == nil { + return errors.New("ScanGenerator is required") } - if err := scanner.TxnManager.WithTxn(context.TODO(), func(r models.Repository) error { - var err error - sqb := r.Scene() - - captions, er := sqb.GetCaptions(s.ID) - if er == nil { - if len(captions) > 0 { - clean, altered := CleanCaptions(s.Path, captions) - if altered { - er = sqb.UpdateCaptions(s.ID, clean) - if er == nil { - logger.Debugf("Captions for %s cleaned: %s -> %s", path, captions, clean) - } - } - } - } - return err - }); err != nil { - logger.Error(err.Error()) - } - - if changed { - // we are operating on a checksum now, so grab a mutex on the checksum - done := make(chan struct{}) - if scanned.New.OSHash != "" { - scanner.MutexManager.Claim(mutexType, scanned.New.OSHash, done) - } - if scanned.New.Checksum != "" { - scanner.MutexManager.Claim(mutexType, scanned.New.Checksum, done) - } - - if err := scanner.TxnManager.WithTxn(ctx, func(r models.Repository) error { - defer close(done) - qb := r.Scene() - - // ensure no clashes of hashes - if scanned.New.Checksum != "" && scanned.Old.Checksum != scanned.New.Checksum { - dupe, _ := qb.FindByChecksum(s.Checksum.String) - if dupe != nil { - return fmt.Errorf("MD5 for file %s is the same as that of %s", path, dupe.Path) - } - } - - if scanned.New.OSHash != "" && scanned.Old.OSHash != scanned.New.OSHash { - dupe, _ := qb.FindByOSHash(scanned.New.OSHash) - if dupe != nil { - return fmt.Errorf("OSHash for file %s is the same as that of %s", path, dupe.Path) - } - } - - s.Interactive = interactive - s.UpdatedAt = models.SQLiteTimestamp{Timestamp: time.Now()} - - _, err := qb.UpdateFull(*s) - return err - }); err != nil { - return err - } - - // Migrate any generated files if the hash has changed - newHash := s.GetHash(scanner.FileNamingAlgorithm) - if newHash != oldHash { - MigrateHash(scanner.Paths, oldHash, newHash) - } - - scanner.PluginCache.ExecutePostHooks(ctx, s.ID, plugin.SceneUpdatePost, nil, nil) - } - - // We already have this item in the database - // check for thumbnails, screenshots - scanner.makeScreenshots(path, videoFile, s.GetHash(scanner.FileNamingAlgorithm)) - return nil } -func (scanner *Scanner) ScanNew(ctx context.Context, file file.SourceFile) (retScene *models.Scene, err error) { - scanned, err := scanner.Scanner.ScanNew(file) +func (h *ScanHandler) Handle(ctx context.Context, f file.File) error { + if err := h.validate(); err != nil { + return err + } + + videoFile, ok := f.(*file.VideoFile) + if !ok { + return ErrNotVideoFile + } + + // try to match the file to a scene + existing, err := h.CreatorUpdater.FindByFileID(ctx, f.Base().ID) if err != nil { - return nil, err + return fmt.Errorf("finding existing scene: %w", err) } - path := file.Path() - checksum := scanned.Checksum - oshash := scanned.OSHash - - // grab a mutex on the checksum and oshash - done := make(chan struct{}) - if oshash != "" { - scanner.MutexManager.Claim(mutexType, oshash, done) - } - if checksum != "" { - scanner.MutexManager.Claim(mutexType, checksum, done) - } - - defer close(done) - - // check for scene by checksum and oshash - MD5 should be - // redundant, but check both - var s *models.Scene - if err := scanner.TxnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - qb := r.Scene() - if checksum != "" { - s, _ = qb.FindByChecksum(checksum) + if len(existing) == 0 { + // try also to match file by fingerprints + existing, err = h.CreatorUpdater.FindByFingerprints(ctx, videoFile.Fingerprints) + if err != nil { + return fmt.Errorf("finding existing scene by fingerprints: %w", err) } - - if s == nil { - s, _ = qb.FindByOSHash(oshash) - } - - return nil - }); err != nil { - return nil, err } - sceneHash := oshash - - if scanner.FileNamingAlgorithm == models.HashAlgorithmMd5 { - sceneHash = checksum - } - - interactive := getInteractive(file.Path()) - - if s != nil { - exists, _ := fsutil.FileExists(s.Path) - if !scanner.CaseSensitiveFs { - // #1426 - if file exists but is a case-insensitive match for the - // original filename, then treat it as a move - if exists && strings.EqualFold(path, s.Path) { - exists = false - } - } - - if exists { - logger.Infof("%s already exists. Duplicate of %s", path, s.Path) - } else { - logger.Infof("%s already exists. Updating path...", path) - scenePartial := models.ScenePartial{ - ID: s.ID, - Path: &path, - Interactive: &interactive, - } - if err := scanner.TxnManager.WithTxn(ctx, func(r models.Repository) error { - _, err := r.Scene().Update(scenePartial) - return err - }); err != nil { - return nil, err - } - - scanner.makeScreenshots(path, nil, sceneHash) - scanner.PluginCache.ExecutePostHooks(ctx, s.ID, plugin.SceneUpdatePost, nil, nil) + if len(existing) > 0 { + if err := h.associateExisting(ctx, existing, videoFile); err != nil { + return err } } else { - logger.Infof("%s doesn't exist. Creating new item...", path) - currentTime := time.Now() - - videoFile, err := scanner.VideoFileCreator.NewVideoFile(path) - if err != nil { - return nil, err + // create a new scene + now := time.Now() + newScene := &models.Scene{ + CreatedAt: now, + UpdatedAt: now, } - title := filepath.Base(path) - if scanner.StripFileExtension { - title = stripExtension(title) + logger.Infof("%s doesn't exist. Creating new scene...", f.Base().Path) + + if err := h.CreatorUpdater.Create(ctx, newScene, []file.ID{videoFile.ID}); err != nil { + return fmt.Errorf("creating new scene: %w", err) } - if scanner.UseFileMetadata && videoFile.Title != "" { - title = videoFile.Title - } + h.PluginCache.ExecutePostHooks(ctx, newScene.ID, plugin.SceneCreatePost, nil, nil) - newScene := models.Scene{ - Checksum: sql.NullString{String: checksum, Valid: checksum != ""}, - OSHash: sql.NullString{String: oshash, Valid: oshash != ""}, - Path: path, - FileModTime: models.NullSQLiteTimestamp{ - Timestamp: scanned.FileModTime, - Valid: true, - }, - Title: sql.NullString{String: title, Valid: true}, - CreatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, - UpdatedAt: models.SQLiteTimestamp{Timestamp: currentTime}, - Interactive: interactive, - } - - if err := videoFileToScene(&newScene, videoFile); err != nil { - return nil, err - } - - if scanner.UseFileMetadata { - newScene.Details = sql.NullString{String: videoFile.Comment, Valid: true} - _ = newScene.Date.Scan(videoFile.CreationTime) - } - - if err := scanner.TxnManager.WithTxn(ctx, func(r models.Repository) error { - var err error - retScene, err = r.Scene().Create(newScene) - return err - }); err != nil { - return nil, err - } - - scanner.makeScreenshots(path, videoFile, sceneHash) - scanner.PluginCache.ExecutePostHooks(ctx, retScene.ID, plugin.SceneCreatePost, nil, nil) + existing = []*models.Scene{newScene} } - return retScene, nil -} + for _, s := range existing { + if err := h.CoverGenerator.GenerateCover(ctx, s, videoFile); err != nil { + // just log if cover generation fails. We can try again on rescan + logger.Errorf("Error generating cover for %s: %v", videoFile.Path, err) + } -func stripExtension(path string) string { - ext := filepath.Ext(path) - return strings.TrimSuffix(path, ext) -} - -func videoFileToScene(s *models.Scene, videoFile *ffmpeg.VideoFile) error { - container, err := ffmpeg.MatchContainer(videoFile.Container, s.Path) - if err != nil { - return fmt.Errorf("matching container: %w", err) + if err := h.ScanGenerator.Generate(ctx, s, videoFile); err != nil { + // just log if cover generation fails. We can try again on rescan + logger.Errorf("Error generating content for %s: %v", videoFile.Path, err) + } } - s.Duration = sql.NullFloat64{Float64: videoFile.Duration, Valid: true} - s.VideoCodec = sql.NullString{String: videoFile.VideoCodec, Valid: true} - s.AudioCodec = sql.NullString{String: videoFile.AudioCodec, Valid: true} - s.Format = sql.NullString{String: string(container), Valid: true} - s.Width = sql.NullInt64{Int64: int64(videoFile.Width), Valid: true} - s.Height = sql.NullInt64{Int64: int64(videoFile.Height), Valid: true} - s.Framerate = sql.NullFloat64{Float64: videoFile.FrameRate, Valid: true} - s.Bitrate = sql.NullInt64{Int64: videoFile.Bitrate, Valid: true} - s.Size = sql.NullString{String: strconv.FormatInt(videoFile.Size, 10), Valid: true} - return nil } -func (scanner *Scanner) makeScreenshots(path string, probeResult *ffmpeg.VideoFile, checksum string) { - thumbPath := scanner.Paths.Scene.GetThumbnailScreenshotPath(checksum) - normalPath := scanner.Paths.Scene.GetScreenshotPath(checksum) - - thumbExists, _ := fsutil.FileExists(thumbPath) - normalExists, _ := fsutil.FileExists(normalPath) - - if thumbExists && normalExists { - return - } - - if probeResult == nil { - var err error - probeResult, err = scanner.VideoFileCreator.NewVideoFile(path) - - if err != nil { - logger.Error(err.Error()) - return +func (h *ScanHandler) associateExisting(ctx context.Context, existing []*models.Scene, f *file.VideoFile) error { + for _, s := range existing { + if err := s.LoadFiles(ctx, h.CreatorUpdater); err != nil { + return err } - logger.Infof("Regenerating images for %s", path) - } - if !thumbExists { - logger.Debugf("Creating thumbnail for %s", path) - if err := scanner.Screenshotter.GenerateThumbnail(context.TODO(), probeResult, checksum); err != nil { - logger.Errorf("Error creating thumbnail for %s: %v", err) + found := false + for _, sf := range s.Files.List() { + if sf.ID == f.ID { + found = true + break + } + } + + if !found { + logger.Infof("Adding %s to scene %s", f.Path, s.DisplayName()) + + if err := h.CreatorUpdater.AddFileID(ctx, s.ID, f.ID); err != nil { + return fmt.Errorf("adding file to scene: %w", err) + } } } - if !normalExists { - logger.Debugf("Creating screenshot for %s", path) - if err := scanner.Screenshotter.GenerateScreenshot(context.TODO(), probeResult, checksum); err != nil { - logger.Errorf("Error creating screenshot for %s: %v", err) - } - } -} - -func getInteractive(path string) bool { - _, err := os.Stat(GetFunscriptPath(path)) - return err == nil + return nil } diff --git a/pkg/scene/screenshot.go b/pkg/scene/screenshot.go index 36f301b51..13464e16e 100644 --- a/pkg/scene/screenshot.go +++ b/pkg/scene/screenshot.go @@ -7,7 +7,7 @@ import ( "image/jpeg" "os" - "github.com/stashapp/stash/pkg/ffmpeg" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models/paths" @@ -18,21 +18,20 @@ import ( _ "image/png" ) -type screenshotter interface { - GenerateScreenshot(ctx context.Context, probeResult *ffmpeg.VideoFile, hash string) error - GenerateThumbnail(ctx context.Context, probeResult *ffmpeg.VideoFile, hash string) error +type CoverGenerator interface { + GenerateCover(ctx context.Context, scene *models.Scene, f *file.VideoFile) error } type ScreenshotSetter interface { SetScreenshot(scene *models.Scene, imageData []byte) error } -type PathsScreenshotSetter struct { +type PathsCoverSetter struct { Paths *paths.Paths FileNamingAlgorithm models.HashAlgorithm } -func (ss *PathsScreenshotSetter) SetScreenshot(scene *models.Scene, imageData []byte) error { +func (ss *PathsCoverSetter) SetScreenshot(scene *models.Scene, imageData []byte) error { checksum := scene.GetHash(ss.FileNamingAlgorithm) return SetScreenshot(ss.Paths, checksum, imageData) } diff --git a/pkg/scene/service.go b/pkg/scene/service.go new file mode 100644 index 000000000..8d2e5dc0c --- /dev/null +++ b/pkg/scene/service.go @@ -0,0 +1,24 @@ +package scene + +import ( + "context" + + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/models" +) + +type FinderByFile interface { + FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Scene, error) +} + +type Repository interface { + FinderByFile + Destroyer + models.VideoFileLoader +} + +type Service struct { + File file.Store + Repository Repository + MarkerDestroyer MarkerDestroyer +} diff --git a/pkg/scene/update.go b/pkg/scene/update.go index e1155c368..420736020 100644 --- a/pkg/scene/update.go +++ b/pkg/scene/update.go @@ -1,16 +1,24 @@ package scene import ( - "database/sql" + "context" "errors" "fmt" "time" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/sliceutil/intslice" "github.com/stashapp/stash/pkg/utils" ) +type Updater interface { + PartialUpdater + UpdateCover(ctx context.Context, sceneID int, cover []byte) error +} + +type PartialUpdater interface { + UpdatePartial(ctx context.Context, id int, updatedScene models.ScenePartial) (*models.Scene, error) +} + var ErrEmptyUpdater = errors.New("no fields have been set") // UpdateSet is used to update a scene and its relationships. @@ -22,12 +30,6 @@ type UpdateSet struct { // in future these could be moved into a separate struct and reused // for a Creator struct - // Not set if nil. Set to []int{} to clear existing - PerformerIDs []int - // Not set if nil. Set to []int{} to clear existing - TagIDs []int - // Not set if nil. Set to []int{} to clear existing - StashIDs []models.StashID // Not set if nil. Set to []byte{} to clear existing CoverImage []byte } @@ -35,54 +37,30 @@ type UpdateSet struct { // IsEmpty returns true if there is nothing to update. func (u *UpdateSet) IsEmpty() bool { withoutID := u.Partial - withoutID.ID = 0 return withoutID == models.ScenePartial{} && - u.PerformerIDs == nil && - u.TagIDs == nil && - u.StashIDs == nil && u.CoverImage == nil } // Update updates a scene by updating the fields in the Partial field, then // updates non-nil relationships. Returns an error if there is no work to // be done. -func (u *UpdateSet) Update(qb models.SceneWriter, screenshotSetter ScreenshotSetter) (*models.Scene, error) { +func (u *UpdateSet) Update(ctx context.Context, qb Updater, screenshotSetter ScreenshotSetter) (*models.Scene, error) { if u.IsEmpty() { return nil, ErrEmptyUpdater } partial := u.Partial - partial.ID = u.ID - partial.UpdatedAt = &models.SQLiteTimestamp{ - Timestamp: time.Now(), - } + updatedAt := time.Now() + partial.UpdatedAt = models.NewOptionalTime(updatedAt) - ret, err := qb.Update(partial) + ret, err := qb.UpdatePartial(ctx, u.ID, partial) if err != nil { return nil, fmt.Errorf("error updating scene: %w", err) } - if u.PerformerIDs != nil { - if err := qb.UpdatePerformers(u.ID, u.PerformerIDs); err != nil { - return nil, fmt.Errorf("error updating scene performers: %w", err) - } - } - - if u.TagIDs != nil { - if err := qb.UpdateTags(u.ID, u.TagIDs); err != nil { - return nil, fmt.Errorf("error updating scene tags: %w", err) - } - } - - if u.StashIDs != nil { - if err := qb.UpdateStashIDs(u.ID, u.StashIDs); err != nil { - return nil, fmt.Errorf("error updating scene stash_ids: %w", err) - } - } - if u.CoverImage != nil { - if err := qb.UpdateCover(u.ID, u.CoverImage); err != nil { + if err := qb.UpdateCover(ctx, u.ID, u.CoverImage); err != nil { return nil, fmt.Errorf("error updating scene cover: %w", err) } @@ -97,23 +75,7 @@ func (u *UpdateSet) Update(qb models.SceneWriter, screenshotSetter ScreenshotSet // UpdateInput converts the UpdateSet into SceneUpdateInput for hook firing purposes. func (u UpdateSet) UpdateInput() models.SceneUpdateInput { // ensure the partial ID is set - u.Partial.ID = u.ID - ret := u.Partial.UpdateInput() - - if u.PerformerIDs != nil { - ret.PerformerIds = intslice.IntSliceToStringSlice(u.PerformerIDs) - } - - if u.TagIDs != nil { - ret.TagIds = intslice.IntSliceToStringSlice(u.TagIDs) - } - - if u.StashIDs != nil { - for _, s := range u.StashIDs { - ss := s.StashIDInput() - ret.StashIds = append(ret.StashIds, &ss) - } - } + ret := u.Partial.UpdateInput(u.ID) if u.CoverImage != nil { // convert back to base64 @@ -124,99 +86,32 @@ func (u UpdateSet) UpdateInput() models.SceneUpdateInput { return ret } -func UpdateFormat(qb models.SceneWriter, id int, format string) (*models.Scene, error) { - return qb.Update(models.ScenePartial{ - ID: id, - Format: &sql.NullString{ - String: format, - Valid: true, +func AddPerformer(ctx context.Context, qb PartialUpdater, o *models.Scene, performerID int) error { + _, err := qb.UpdatePartial(ctx, o.ID, models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, }, }) + return err } -func UpdateOSHash(qb models.SceneWriter, id int, oshash string) (*models.Scene, error) { - return qb.Update(models.ScenePartial{ - ID: id, - OSHash: &sql.NullString{ - String: oshash, - Valid: true, +func AddTag(ctx context.Context, qb PartialUpdater, o *models.Scene, tagID int) error { + _, err := qb.UpdatePartial(ctx, o.ID, models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, }, }) + return err } -func UpdateChecksum(qb models.SceneWriter, id int, checksum string) (*models.Scene, error) { - return qb.Update(models.ScenePartial{ - ID: id, - Checksum: &sql.NullString{ - String: checksum, - Valid: true, +func AddGallery(ctx context.Context, qb PartialUpdater, o *models.Scene, galleryID int) error { + _, err := qb.UpdatePartial(ctx, o.ID, models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{galleryID}, + Mode: models.RelationshipUpdateModeAdd, }, }) -} - -func UpdateFileModTime(qb models.SceneWriter, id int, modTime models.NullSQLiteTimestamp) (*models.Scene, error) { - return qb.Update(models.ScenePartial{ - ID: id, - FileModTime: &modTime, - }) -} - -func AddPerformer(qb models.SceneReaderWriter, id int, performerID int) (bool, error) { - performerIDs, err := qb.GetPerformerIDs(id) - if err != nil { - return false, err - } - - oldLen := len(performerIDs) - performerIDs = intslice.IntAppendUnique(performerIDs, performerID) - - if len(performerIDs) != oldLen { - if err := qb.UpdatePerformers(id, performerIDs); err != nil { - return false, err - } - - return true, nil - } - - return false, nil -} - -func AddTag(qb models.SceneReaderWriter, id int, tagID int) (bool, error) { - tagIDs, err := qb.GetTagIDs(id) - if err != nil { - return false, err - } - - oldLen := len(tagIDs) - tagIDs = intslice.IntAppendUnique(tagIDs, tagID) - - if len(tagIDs) != oldLen { - if err := qb.UpdateTags(id, tagIDs); err != nil { - return false, err - } - - return true, nil - } - - return false, nil -} - -func AddGallery(qb models.SceneReaderWriter, id int, galleryID int) (bool, error) { - galleryIDs, err := qb.GetGalleryIDs(id) - if err != nil { - return false, err - } - - oldLen := len(galleryIDs) - galleryIDs = intslice.IntAppendUnique(galleryIDs, galleryID) - - if len(galleryIDs) != oldLen { - if err := qb.UpdateGalleries(id, galleryIDs); err != nil { - return false, err - } - - return true, nil - } - - return false, nil + return err } diff --git a/pkg/scene/update_test.go b/pkg/scene/update_test.go index 4619fd137..ffd84f00c 100644 --- a/pkg/scene/update_test.go +++ b/pkg/scene/update_test.go @@ -1,6 +1,7 @@ package scene import ( + "context" "errors" "strconv" "testing" @@ -30,20 +31,11 @@ func TestUpdater_IsEmpty(t *testing.T) { &UpdateSet{}, true, }, - { - "id only", - &UpdateSet{ - Partial: models.ScenePartial{ - ID: 1, - }, - }, - true, - }, { "partial set", &UpdateSet{ Partial: models.ScenePartial{ - Organized: &organized, + Organized: models.NewOptionalBool(organized), }, }, false, @@ -51,21 +43,36 @@ func TestUpdater_IsEmpty(t *testing.T) { { "performer set", &UpdateSet{ - PerformerIDs: ids, + Partial: models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: ids, + Mode: models.RelationshipUpdateModeSet, + }, + }, }, false, }, { "tags set", &UpdateSet{ - TagIDs: ids, + Partial: models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: ids, + Mode: models.RelationshipUpdateModeSet, + }, + }, }, false, }, { "performer set", &UpdateSet{ - StashIDs: stashIDs, + Partial: models.ScenePartial{ + StashIDs: &models.UpdateStashIDs{ + StashIDs: stashIDs, + Mode: models.RelationshipUpdateModeSet, + }, + }, }, false, }, @@ -104,16 +111,12 @@ func TestUpdater_Update(t *testing.T) { tagID ) + ctx := context.Background() + performerIDs := []int{performerID} tagIDs := []int{tagID} stashID := "stashID" endpoint := "endpoint" - stashIDs := []models.StashID{ - { - StashID: stashID, - Endpoint: endpoint, - }, - } title := "title" cover := []byte("cover") @@ -123,22 +126,13 @@ func TestUpdater_Update(t *testing.T) { updateErr := errors.New("error updating") qb := mocks.SceneReaderWriter{} - qb.On("Update", mock.MatchedBy(func(s models.ScenePartial) bool { - return s.ID != badUpdateID - })).Return(validScene, nil) - qb.On("Update", mock.MatchedBy(func(s models.ScenePartial) bool { - return s.ID == badUpdateID - })).Return(nil, updateErr) + qb.On("UpdatePartial", ctx, mock.MatchedBy(func(id int) bool { + return id != badUpdateID + }), mock.Anything).Return(validScene, nil) + qb.On("UpdatePartial", ctx, badUpdateID, mock.Anything).Return(nil, updateErr) - qb.On("UpdatePerformers", sceneID, performerIDs).Return(nil).Once() - qb.On("UpdateTags", sceneID, tagIDs).Return(nil).Once() - qb.On("UpdateStashIDs", sceneID, stashIDs).Return(nil).Once() - qb.On("UpdateCover", sceneID, cover).Return(nil).Once() - - qb.On("UpdatePerformers", badPerformersID, performerIDs).Return(updateErr).Once() - qb.On("UpdateTags", badTagsID, tagIDs).Return(updateErr).Once() - qb.On("UpdateStashIDs", badStashIDsID, stashIDs).Return(updateErr).Once() - qb.On("UpdateCover", badCoverID, cover).Return(updateErr).Once() + qb.On("UpdateCover", ctx, sceneID, cover).Return(nil).Once() + qb.On("UpdateCover", ctx, badCoverID, cover).Return(updateErr).Once() tests := []struct { name string @@ -157,13 +151,24 @@ func TestUpdater_Update(t *testing.T) { { "update all", &UpdateSet{ - ID: sceneID, - PerformerIDs: performerIDs, - TagIDs: tagIDs, - StashIDs: []models.StashID{ - { - StashID: stashID, - Endpoint: endpoint, + ID: sceneID, + Partial: models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: performerIDs, + Mode: models.RelationshipUpdateModeSet, + }, + TagIDs: &models.UpdateIDs{ + IDs: tagIDs, + Mode: models.RelationshipUpdateModeSet, + }, + StashIDs: &models.UpdateStashIDs{ + StashIDs: []models.StashID{ + { + StashID: stashID, + Endpoint: endpoint, + }, + }, + Mode: models.RelationshipUpdateModeSet, }, }, CoverImage: cover, @@ -176,7 +181,7 @@ func TestUpdater_Update(t *testing.T) { &UpdateSet{ ID: sceneID, Partial: models.ScenePartial{ - Title: models.NullStringPtr(title), + Title: models.NewOptionalString(title), }, }, false, @@ -187,39 +192,12 @@ func TestUpdater_Update(t *testing.T) { &UpdateSet{ ID: badUpdateID, Partial: models.ScenePartial{ - Title: models.NullStringPtr(title), + Title: models.NewOptionalString(title), }, }, true, true, }, - { - "error updating performers", - &UpdateSet{ - ID: badPerformersID, - PerformerIDs: performerIDs, - }, - true, - true, - }, - { - "error updating tags", - &UpdateSet{ - ID: badTagsID, - TagIDs: tagIDs, - }, - true, - true, - }, - { - "error updating stash IDs", - &UpdateSet{ - ID: badStashIDsID, - StashIDs: stashIDs, - }, - true, - true, - }, { "error updating cover", &UpdateSet{ @@ -232,7 +210,7 @@ func TestUpdater_Update(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := tt.u.Update(&qb, &mockScreenshotSetter{}) + got, err := tt.u.Update(ctx, &qb, &mockScreenshotSetter{}) if (err != nil) != tt.wantErr { t.Errorf("Updater.Update() error = %v, wantErr %v", err, tt.wantErr) return @@ -272,7 +250,7 @@ func TestUpdateSet_UpdateInput(t *testing.T) { Endpoint: endpoint, }, } - stashIDInputs := []*models.StashIDInput{ + stashIDInputs := []models.StashID{ { StashID: stashID, Endpoint: endpoint, @@ -300,11 +278,22 @@ func TestUpdateSet_UpdateInput(t *testing.T) { { "update all", UpdateSet{ - ID: sceneID, - PerformerIDs: performerIDs, - TagIDs: tagIDs, - StashIDs: stashIDs, - CoverImage: cover, + ID: sceneID, + Partial: models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: performerIDs, + Mode: models.RelationshipUpdateModeSet, + }, + TagIDs: &models.UpdateIDs{ + IDs: tagIDs, + Mode: models.RelationshipUpdateModeSet, + }, + StashIDs: &models.UpdateStashIDs{ + StashIDs: stashIDs, + Mode: models.RelationshipUpdateModeSet, + }, + }, + CoverImage: cover, }, models.SceneUpdateInput{ ID: sceneIDStr, @@ -319,7 +308,7 @@ func TestUpdateSet_UpdateInput(t *testing.T) { UpdateSet{ ID: sceneID, Partial: models.ScenePartial{ - Title: models.NullStringPtr(title), + Title: models.NewOptionalString(title), }, }, models.SceneUpdateInput{ diff --git a/pkg/scraper/action.go b/pkg/scraper/action.go index 3f80cee29..0011441fb 100644 --- a/pkg/scraper/action.go +++ b/pkg/scraper/action.go @@ -25,24 +25,24 @@ func (e scraperAction) IsValid() bool { } type scraperActionImpl interface { - scrapeByURL(ctx context.Context, url string, ty models.ScrapeContentType) (models.ScrapedContent, error) - scrapeByName(ctx context.Context, name string, ty models.ScrapeContentType) ([]models.ScrapedContent, error) - scrapeByFragment(ctx context.Context, input Input) (models.ScrapedContent, error) + scrapeByURL(ctx context.Context, url string, ty ScrapeContentType) (ScrapedContent, error) + scrapeByName(ctx context.Context, name string, ty ScrapeContentType) ([]ScrapedContent, error) + scrapeByFragment(ctx context.Context, input Input) (ScrapedContent, error) - scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*models.ScrapedScene, error) - scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*models.ScrapedGallery, error) + scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*ScrapedScene, error) + scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*ScrapedGallery, error) } -func (c config) getScraper(scraper scraperTypeConfig, client *http.Client, txnManager models.TransactionManager, globalConfig GlobalConfig) scraperActionImpl { +func (c config) getScraper(scraper scraperTypeConfig, client *http.Client, globalConfig GlobalConfig) scraperActionImpl { switch scraper.Action { case scraperActionScript: return newScriptScraper(scraper, c, globalConfig) case scraperActionStash: - return newStashScraper(scraper, client, txnManager, c, globalConfig) + return newStashScraper(scraper, client, c, globalConfig) case scraperActionXPath: - return newXpathScraper(scraper, client, txnManager, c, globalConfig) + return newXpathScraper(scraper, client, c, globalConfig) case scraperActionJson: - return newJsonScraper(scraper, client, txnManager, c, globalConfig) + return newJsonScraper(scraper, client, c, globalConfig) } panic("unknown scraper action: " + scraper.Action) diff --git a/pkg/scraper/autotag.go b/pkg/scraper/autotag.go index 4a86d8df2..cbcd38cfa 100644 --- a/pkg/scraper/autotag.go +++ b/pkg/scraper/autotag.go @@ -8,6 +8,7 @@ import ( "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" ) // autoTagScraperID is the scraper ID for the built-in AutoTag scraper @@ -17,12 +18,17 @@ const ( ) type autotagScraper struct { - txnManager models.TransactionManager + // repository models.Repository + txnManager txn.Manager + performerReader match.PerformerAutoTagQueryer + studioReader match.StudioAutoTagQueryer + tagReader match.TagAutoTagQueryer + globalConfig GlobalConfig } -func autotagMatchPerformers(path string, performerReader models.PerformerReader, trimExt bool) ([]*models.ScrapedPerformer, error) { - p, err := match.PathToPerformers(path, performerReader, nil, trimExt) +func autotagMatchPerformers(ctx context.Context, path string, performerReader match.PerformerAutoTagQueryer, trimExt bool) ([]*models.ScrapedPerformer, error) { + p, err := match.PathToPerformers(ctx, path, performerReader, nil, trimExt) if err != nil { return nil, fmt.Errorf("error matching performers: %w", err) } @@ -45,8 +51,8 @@ func autotagMatchPerformers(path string, performerReader models.PerformerReader, return ret, nil } -func autotagMatchStudio(path string, studioReader models.StudioReader, trimExt bool) (*models.ScrapedStudio, error) { - studio, err := match.PathToStudio(path, studioReader, nil, trimExt) +func autotagMatchStudio(ctx context.Context, path string, studioReader match.StudioAutoTagQueryer, trimExt bool) (*models.ScrapedStudio, error) { + studio, err := match.PathToStudio(ctx, path, studioReader, nil, trimExt) if err != nil { return nil, fmt.Errorf("error matching studios: %w", err) } @@ -62,8 +68,8 @@ func autotagMatchStudio(path string, studioReader models.StudioReader, trimExt b return nil, nil } -func autotagMatchTags(path string, tagReader models.TagReader, trimExt bool) ([]*models.ScrapedTag, error) { - t, err := match.PathToTags(path, tagReader, nil, trimExt) +func autotagMatchTags(ctx context.Context, path string, tagReader match.TagAutoTagQueryer, trimExt bool) ([]*models.ScrapedTag, error) { + t, err := match.PathToTags(ctx, path, tagReader, nil, trimExt) if err != nil { return nil, fmt.Errorf("error matching tags: %w", err) } @@ -83,29 +89,29 @@ func autotagMatchTags(path string, tagReader models.TagReader, trimExt bool) ([] return ret, nil } -func (s autotagScraper) viaScene(ctx context.Context, _client *http.Client, scene *models.Scene) (*models.ScrapedScene, error) { - var ret *models.ScrapedScene +func (s autotagScraper) viaScene(ctx context.Context, _client *http.Client, scene *models.Scene) (*ScrapedScene, error) { + var ret *ScrapedScene const trimExt = false // populate performers, studio and tags based on scene path - if err := s.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { + if err := txn.WithTxn(ctx, s.txnManager, func(ctx context.Context) error { path := scene.Path - performers, err := autotagMatchPerformers(path, r.Performer(), trimExt) + performers, err := autotagMatchPerformers(ctx, path, s.performerReader, trimExt) if err != nil { return fmt.Errorf("autotag scraper viaScene: %w", err) } - studio, err := autotagMatchStudio(path, r.Studio(), trimExt) + studio, err := autotagMatchStudio(ctx, path, s.studioReader, trimExt) if err != nil { return fmt.Errorf("autotag scraper viaScene: %w", err) } - tags, err := autotagMatchTags(path, r.Tag(), trimExt) + tags, err := autotagMatchTags(ctx, path, s.tagReader, trimExt) if err != nil { return fmt.Errorf("autotag scraper viaScene: %w", err) } if len(performers) > 0 || studio != nil || len(tags) > 0 { - ret = &models.ScrapedScene{ + ret = &ScrapedScene{ Performers: performers, Studio: studio, Tags: tags, @@ -120,36 +126,37 @@ func (s autotagScraper) viaScene(ctx context.Context, _client *http.Client, scen return ret, nil } -func (s autotagScraper) viaGallery(ctx context.Context, _client *http.Client, gallery *models.Gallery) (*models.ScrapedGallery, error) { - if !gallery.Path.Valid { +func (s autotagScraper) viaGallery(ctx context.Context, _client *http.Client, gallery *models.Gallery) (*ScrapedGallery, error) { + path := gallery.Path + if path == "" { // not valid for non-path-based galleries return nil, nil } // only trim extension if gallery is file-based - trimExt := gallery.Zip + trimExt := gallery.PrimaryFileID != nil - var ret *models.ScrapedGallery + var ret *ScrapedGallery // populate performers, studio and tags based on scene path - if err := s.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - path := gallery.Path.String - performers, err := autotagMatchPerformers(path, r.Performer(), trimExt) + if err := txn.WithTxn(ctx, s.txnManager, func(ctx context.Context) error { + path := gallery.Path + performers, err := autotagMatchPerformers(ctx, path, s.performerReader, trimExt) if err != nil { return fmt.Errorf("autotag scraper viaGallery: %w", err) } - studio, err := autotagMatchStudio(path, r.Studio(), trimExt) + studio, err := autotagMatchStudio(ctx, path, s.studioReader, trimExt) if err != nil { return fmt.Errorf("autotag scraper viaGallery: %w", err) } - tags, err := autotagMatchTags(path, r.Tag(), trimExt) + tags, err := autotagMatchTags(ctx, path, s.tagReader, trimExt) if err != nil { return fmt.Errorf("autotag scraper viaGallery: %w", err) } if len(performers) > 0 || studio != nil || len(tags) > 0 { - ret = &models.ScrapedGallery{ + ret = &ScrapedGallery{ Performers: performers, Studio: studio, Tags: tags, @@ -164,42 +171,45 @@ func (s autotagScraper) viaGallery(ctx context.Context, _client *http.Client, ga return ret, nil } -func (s autotagScraper) supports(ty models.ScrapeContentType) bool { +func (s autotagScraper) supports(ty ScrapeContentType) bool { switch ty { - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: return true - case models.ScrapeContentTypeGallery: + case ScrapeContentTypeGallery: return true } return false } -func (s autotagScraper) supportsURL(url string, ty models.ScrapeContentType) bool { +func (s autotagScraper) supportsURL(url string, ty ScrapeContentType) bool { return false } -func (s autotagScraper) spec() models.Scraper { - supportedScrapes := []models.ScrapeType{ - models.ScrapeTypeFragment, +func (s autotagScraper) spec() Scraper { + supportedScrapes := []ScrapeType{ + ScrapeTypeFragment, } - return models.Scraper{ + return Scraper{ ID: autoTagScraperID, Name: autoTagScraperName, - Scene: &models.ScraperSpec{ + Scene: &ScraperSpec{ SupportedScrapes: supportedScrapes, }, - Gallery: &models.ScraperSpec{ + Gallery: &ScraperSpec{ SupportedScrapes: supportedScrapes, }, } } -func getAutoTagScraper(txnManager models.TransactionManager, globalConfig GlobalConfig) scraper { +func getAutoTagScraper(txnManager txn.Manager, repo Repository, globalConfig GlobalConfig) scraper { base := autotagScraper{ - txnManager: txnManager, - globalConfig: globalConfig, + txnManager: txnManager, + performerReader: repo.PerformerFinder, + studioReader: repo.StudioFinder, + tagReader: repo.TagFinder, + globalConfig: globalConfig, } return base diff --git a/pkg/scraper/cache.go b/pkg/scraper/cache.go index 2190dcb03..64cd63629 100644 --- a/pkg/scraper/cache.go +++ b/pkg/scraper/cache.go @@ -13,7 +13,11 @@ import ( "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/tag" + "github.com/stashapp/stash/pkg/txn" ) const ( @@ -47,12 +51,43 @@ func isCDPPathWS(c GlobalConfig) bool { return strings.HasPrefix(c.GetScraperCDPPath(), "ws://") } +type PerformerFinder interface { + match.PerformerAutoTagQueryer + match.PerformerFinder +} + +type StudioFinder interface { + match.StudioAutoTagQueryer + match.StudioFinder +} + +type TagFinder interface { + match.TagAutoTagQueryer + tag.Queryer +} + +type GalleryFinder interface { + Find(ctx context.Context, id int) (*models.Gallery, error) + models.FileLoader +} + +type Repository struct { + SceneFinder scene.IDFinder + GalleryFinder GalleryFinder + TagFinder TagFinder + PerformerFinder PerformerFinder + MovieFinder match.MovieNamesFinder + StudioFinder StudioFinder +} + // Cache stores the database of scrapers type Cache struct { client *http.Client scrapers map[string]scraper // Scraper ID -> Scraper globalConfig GlobalConfig - txnManager models.TransactionManager + txnManager txn.Manager + + repository Repository } // newClient creates a scraper-local http client we use throughout the scraper subsystem. @@ -81,30 +116,33 @@ func newClient(gc GlobalConfig) *http.Client { // // Scraper configurations are loaded from yml files in the provided scrapers // directory and any subdirectories. -func NewCache(globalConfig GlobalConfig, txnManager models.TransactionManager) (*Cache, error) { +func NewCache(globalConfig GlobalConfig, txnManager txn.Manager, repo Repository) (*Cache, error) { // HTTP Client setup client := newClient(globalConfig) - scrapers, err := loadScrapers(globalConfig, txnManager) + ret := &Cache{ + client: client, + globalConfig: globalConfig, + txnManager: txnManager, + repository: repo, + } + + var err error + ret.scrapers, err = ret.loadScrapers() if err != nil { return nil, err } - return &Cache{ - client: client, - globalConfig: globalConfig, - scrapers: scrapers, - txnManager: txnManager, - }, nil + return ret, nil } -func loadScrapers(globalConfig GlobalConfig, txnManager models.TransactionManager) (map[string]scraper, error) { - path := globalConfig.GetScrapersPath() +func (c *Cache) loadScrapers() (map[string]scraper, error) { + path := c.globalConfig.GetScrapersPath() scrapers := make(map[string]scraper) // Add built-in scrapers - freeOnes := getFreeonesScraper(txnManager, globalConfig) - autoTag := getAutoTagScraper(txnManager, globalConfig) + freeOnes := getFreeonesScraper(c.globalConfig) + autoTag := getAutoTagScraper(c.txnManager, c.repository, c.globalConfig) scrapers[freeOnes.spec().ID] = freeOnes scrapers[autoTag.spec().ID] = autoTag @@ -113,11 +151,11 @@ func loadScrapers(globalConfig GlobalConfig, txnManager models.TransactionManage scraperFiles := []string{} err := fsutil.SymWalk(path, func(fp string, f os.FileInfo, err error) error { if filepath.Ext(fp) == ".yml" { - c, err := loadConfigFromYAMLFile(fp) + conf, err := loadConfigFromYAMLFile(fp) if err != nil { logger.Errorf("Error loading scraper %s: %v", fp, err) } else { - scraper := newGroupScraper(*c, txnManager, globalConfig) + scraper := newGroupScraper(*conf, c.globalConfig) scrapers[scraper.spec().ID] = scraper } scraperFiles = append(scraperFiles, fp) @@ -137,7 +175,7 @@ func loadScrapers(globalConfig GlobalConfig, txnManager models.TransactionManage // In the event of an error during loading, the cache will be left empty. func (c *Cache) ReloadScrapers() error { c.scrapers = nil - scrapers, err := loadScrapers(c.globalConfig, c.txnManager) + scrapers, err := c.loadScrapers() if err != nil { return err } @@ -148,8 +186,8 @@ func (c *Cache) ReloadScrapers() error { // ListScrapers lists scrapers matching one of the given types. // Returns a list of scrapers, sorted by their ID. -func (c Cache) ListScrapers(tys []models.ScrapeContentType) []*models.Scraper { - var ret []*models.Scraper +func (c Cache) ListScrapers(tys []ScrapeContentType) []*Scraper { + var ret []*Scraper for _, s := range c.scrapers { for _, t := range tys { if s.supports(t) { @@ -168,7 +206,7 @@ func (c Cache) ListScrapers(tys []models.ScrapeContentType) []*models.Scraper { } // GetScraper returns the scraper matching the provided id. -func (c Cache) GetScraper(scraperID string) *models.Scraper { +func (c Cache) GetScraper(scraperID string) *Scraper { s := c.findScraper(scraperID) if s != nil { spec := s.spec() @@ -187,7 +225,7 @@ func (c Cache) findScraper(scraperID string) scraper { return nil } -func (c Cache) ScrapeName(ctx context.Context, id, query string, ty models.ScrapeContentType) ([]models.ScrapedContent, error) { +func (c Cache) ScrapeName(ctx context.Context, id, query string, ty ScrapeContentType) ([]ScrapedContent, error) { // find scraper with the provided id s := c.findScraper(id) if s == nil { @@ -206,7 +244,7 @@ func (c Cache) ScrapeName(ctx context.Context, id, query string, ty models.Scrap } // ScrapeFragment uses the given fragment input to scrape -func (c Cache) ScrapeFragment(ctx context.Context, id string, input Input) (models.ScrapedContent, error) { +func (c Cache) ScrapeFragment(ctx context.Context, id string, input Input) (ScrapedContent, error) { s := c.findScraper(id) if s == nil { return nil, fmt.Errorf("%w: id %s", ErrNotFound, id) @@ -228,7 +266,7 @@ func (c Cache) ScrapeFragment(ctx context.Context, id string, input Input) (mode // ScrapeURL scrapes a given url for the given content. Searches the scraper cache // and picks the first scraper capable of scraping the given url into the desired // content. Returns the scraped content or an error if the scrape fails. -func (c Cache) ScrapeURL(ctx context.Context, url string, ty models.ScrapeContentType) (models.ScrapedContent, error) { +func (c Cache) ScrapeURL(ctx context.Context, url string, ty ScrapeContentType) (ScrapedContent, error) { for _, s := range c.scrapers { if s.supportsURL(url, ty) { ul, ok := s.(urlScraper) @@ -251,7 +289,7 @@ func (c Cache) ScrapeURL(ctx context.Context, url string, ty models.ScrapeConten return nil, nil } -func (c Cache) ScrapeID(ctx context.Context, scraperID string, id int, ty models.ScrapeContentType) (models.ScrapedContent, error) { +func (c Cache) ScrapeID(ctx context.Context, scraperID string, id int, ty ScrapeContentType) (ScrapedContent, error) { s := c.findScraper(scraperID) if s == nil { return nil, fmt.Errorf("%w: id %s", ErrNotFound, scraperID) @@ -261,15 +299,15 @@ func (c Cache) ScrapeID(ctx context.Context, scraperID string, id int, ty models return nil, fmt.Errorf("%w: cannot use scraper %s to scrape %v content", ErrNotSupported, scraperID, ty) } - var ret models.ScrapedContent + var ret ScrapedContent switch ty { - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: ss, ok := s.(sceneScraper) if !ok { return nil, fmt.Errorf("%w: cannot use scraper %s as a scene scraper", ErrNotSupported, scraperID) } - scene, err := getScene(ctx, id, c.txnManager) + scene, err := c.getScene(ctx, id) if err != nil { return nil, fmt.Errorf("scraper %s: unable to load scene id %v: %w", scraperID, id, err) } @@ -284,13 +322,13 @@ func (c Cache) ScrapeID(ctx context.Context, scraperID string, id int, ty models if scraped != nil { ret = scraped } - case models.ScrapeContentTypeGallery: + case ScrapeContentTypeGallery: gs, ok := s.(galleryScraper) if !ok { return nil, fmt.Errorf("%w: cannot use scraper %s as a gallery scraper", ErrNotSupported, scraperID) } - gallery, err := getGallery(ctx, id, c.txnManager) + gallery, err := c.getGallery(ctx, id) if err != nil { return nil, fmt.Errorf("scraper %s: unable to load gallery id %v: %w", scraperID, id, err) } @@ -309,3 +347,32 @@ func (c Cache) ScrapeID(ctx context.Context, scraperID string, id int, ty models return c.postScrape(ctx, ret) } + +func (c Cache) getScene(ctx context.Context, sceneID int) (*models.Scene, error) { + var ret *models.Scene + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + var err error + ret, err = c.repository.SceneFinder.Find(ctx, sceneID) + return err + }); err != nil { + return nil, err + } + return ret, nil +} + +func (c Cache) getGallery(ctx context.Context, galleryID int) (*models.Gallery, error) { + var ret *models.Gallery + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + var err error + ret, err = c.repository.GalleryFinder.Find(ctx, galleryID) + + if ret != nil { + err = ret.LoadFiles(ctx, c.repository.GalleryFinder) + } + + return err + }); err != nil { + return nil, err + } + return ret, nil +} diff --git a/pkg/scraper/config.go b/pkg/scraper/config.go index 4782fb47b..3a0aadf51 100644 --- a/pkg/scraper/config.go +++ b/pkg/scraper/config.go @@ -8,7 +8,6 @@ import ( "path/filepath" "strings" - "github.com/stashapp/stash/pkg/models" "gopkg.in/yaml.v2" ) @@ -233,21 +232,21 @@ func loadConfigFromYAMLFile(path string) (*config, error) { return ret, nil } -func (c config) spec() models.Scraper { - ret := models.Scraper{ +func (c config) spec() Scraper { + ret := Scraper{ ID: c.ID, Name: c.Name, } - performer := models.ScraperSpec{} + performer := ScraperSpec{} if c.PerformerByName != nil { - performer.SupportedScrapes = append(performer.SupportedScrapes, models.ScrapeTypeName) + performer.SupportedScrapes = append(performer.SupportedScrapes, ScrapeTypeName) } if c.PerformerByFragment != nil { - performer.SupportedScrapes = append(performer.SupportedScrapes, models.ScrapeTypeFragment) + performer.SupportedScrapes = append(performer.SupportedScrapes, ScrapeTypeFragment) } if len(c.PerformerByURL) > 0 { - performer.SupportedScrapes = append(performer.SupportedScrapes, models.ScrapeTypeURL) + performer.SupportedScrapes = append(performer.SupportedScrapes, ScrapeTypeURL) for _, v := range c.PerformerByURL { performer.Urls = append(performer.Urls, v.URL...) } @@ -257,15 +256,15 @@ func (c config) spec() models.Scraper { ret.Performer = &performer } - scene := models.ScraperSpec{} + scene := ScraperSpec{} if c.SceneByFragment != nil { - scene.SupportedScrapes = append(scene.SupportedScrapes, models.ScrapeTypeFragment) + scene.SupportedScrapes = append(scene.SupportedScrapes, ScrapeTypeFragment) } if c.SceneByName != nil && c.SceneByQueryFragment != nil { - scene.SupportedScrapes = append(scene.SupportedScrapes, models.ScrapeTypeName) + scene.SupportedScrapes = append(scene.SupportedScrapes, ScrapeTypeName) } if len(c.SceneByURL) > 0 { - scene.SupportedScrapes = append(scene.SupportedScrapes, models.ScrapeTypeURL) + scene.SupportedScrapes = append(scene.SupportedScrapes, ScrapeTypeURL) for _, v := range c.SceneByURL { scene.Urls = append(scene.Urls, v.URL...) } @@ -275,12 +274,12 @@ func (c config) spec() models.Scraper { ret.Scene = &scene } - gallery := models.ScraperSpec{} + gallery := ScraperSpec{} if c.GalleryByFragment != nil { - gallery.SupportedScrapes = append(gallery.SupportedScrapes, models.ScrapeTypeFragment) + gallery.SupportedScrapes = append(gallery.SupportedScrapes, ScrapeTypeFragment) } if len(c.GalleryByURL) > 0 { - gallery.SupportedScrapes = append(gallery.SupportedScrapes, models.ScrapeTypeURL) + gallery.SupportedScrapes = append(gallery.SupportedScrapes, ScrapeTypeURL) for _, v := range c.GalleryByURL { gallery.Urls = append(gallery.Urls, v.URL...) } @@ -290,9 +289,9 @@ func (c config) spec() models.Scraper { ret.Gallery = &gallery } - movie := models.ScraperSpec{} + movie := ScraperSpec{} if len(c.MovieByURL) > 0 { - movie.SupportedScrapes = append(movie.SupportedScrapes, models.ScrapeTypeURL) + movie.SupportedScrapes = append(movie.SupportedScrapes, ScrapeTypeURL) for _, v := range c.MovieByURL { movie.Urls = append(movie.Urls, v.URL...) } @@ -305,42 +304,42 @@ func (c config) spec() models.Scraper { return ret } -func (c config) supports(ty models.ScrapeContentType) bool { +func (c config) supports(ty ScrapeContentType) bool { switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: return c.PerformerByName != nil || c.PerformerByFragment != nil || len(c.PerformerByURL) > 0 - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: return (c.SceneByName != nil && c.SceneByQueryFragment != nil) || c.SceneByFragment != nil || len(c.SceneByURL) > 0 - case models.ScrapeContentTypeGallery: + case ScrapeContentTypeGallery: return c.GalleryByFragment != nil || len(c.GalleryByURL) > 0 - case models.ScrapeContentTypeMovie: + case ScrapeContentTypeMovie: return len(c.MovieByURL) > 0 } panic("Unhandled ScrapeContentType") } -func (c config) matchesURL(url string, ty models.ScrapeContentType) bool { +func (c config) matchesURL(url string, ty ScrapeContentType) bool { switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: for _, scraper := range c.PerformerByURL { if scraper.matchesURL(url) { return true } } - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: for _, scraper := range c.SceneByURL { if scraper.matchesURL(url) { return true } } - case models.ScrapeContentTypeGallery: + case ScrapeContentTypeGallery: for _, scraper := range c.GalleryByURL { if scraper.matchesURL(url) { return true } } - case models.ScrapeContentTypeMovie: + case ScrapeContentTypeMovie: for _, scraper := range c.MovieByURL { if scraper.matchesURL(url) { return true diff --git a/pkg/scraper/freeones.go b/pkg/scraper/freeones.go index 7b6c81649..9a8eb4859 100644 --- a/pkg/scraper/freeones.go +++ b/pkg/scraper/freeones.go @@ -4,7 +4,6 @@ import ( "strings" "github.com/stashapp/stash/pkg/logger" - "github.com/stashapp/stash/pkg/models" ) // FreeonesScraperID is the scraper ID for the built-in Freeones scraper @@ -123,7 +122,7 @@ xPathScrapers: # Last updated April 13, 2021 ` -func getFreeonesScraper(txnManager models.TransactionManager, globalConfig GlobalConfig) scraper { +func getFreeonesScraper(globalConfig GlobalConfig) scraper { yml := freeonesScraperConfig c, err := loadConfigFromYAML(FreeonesScraperID, strings.NewReader(yml)) @@ -131,5 +130,5 @@ func getFreeonesScraper(txnManager models.TransactionManager, globalConfig Globa logger.Fatalf("Error loading builtin freeones scraper: %s", err.Error()) } - return newGroupScraper(*c, txnManager, globalConfig) + return newGroupScraper(*c, globalConfig) } diff --git a/pkg/scraper/gallery.go b/pkg/scraper/gallery.go new file mode 100644 index 000000000..db2c98755 --- /dev/null +++ b/pkg/scraper/gallery.go @@ -0,0 +1,22 @@ +package scraper + +import "github.com/stashapp/stash/pkg/models" + +type ScrapedGallery struct { + Title *string `json:"title"` + Details *string `json:"details"` + URL *string `json:"url"` + Date *string `json:"date"` + Studio *models.ScrapedStudio `json:"studio"` + Tags []*models.ScrapedTag `json:"tags"` + Performers []*models.ScrapedPerformer `json:"performers"` +} + +func (ScrapedGallery) IsScrapedContent() {} + +type ScrapedGalleryInput struct { + Title *string `json:"title"` + Details *string `json:"details"` + URL *string `json:"url"` + Date *string `json:"date"` +} diff --git a/pkg/scraper/group.go b/pkg/scraper/group.go index 7a3620118..bbf0a680a 100644 --- a/pkg/scraper/group.go +++ b/pkg/scraper/group.go @@ -11,19 +11,17 @@ import ( type group struct { config config - txnManager models.TransactionManager globalConf GlobalConfig } -func newGroupScraper(c config, txnManager models.TransactionManager, globalConfig GlobalConfig) scraper { +func newGroupScraper(c config, globalConfig GlobalConfig) scraper { return group{ config: c, - txnManager: txnManager, globalConf: globalConfig, } } -func (g group) spec() models.Scraper { +func (g group) spec() Scraper { return g.config.spec() } @@ -42,61 +40,61 @@ func (g group) fragmentScraper(input Input) *scraperTypeConfig { return nil } -func (g group) viaFragment(ctx context.Context, client *http.Client, input Input) (models.ScrapedContent, error) { +func (g group) viaFragment(ctx context.Context, client *http.Client, input Input) (ScrapedContent, error) { stc := g.fragmentScraper(input) if stc == nil { // If there's no performer fragment scraper in the group, we try to use // the URL scraper. Check if there's an URL in the input, and then shift // to an URL scrape if it's present. if input.Performer != nil && input.Performer.URL != nil && *input.Performer.URL != "" { - return g.viaURL(ctx, client, *input.Performer.URL, models.ScrapeContentTypePerformer) + return g.viaURL(ctx, client, *input.Performer.URL, ScrapeContentTypePerformer) } return nil, ErrNotSupported } - s := g.config.getScraper(*stc, client, g.txnManager, g.globalConf) + s := g.config.getScraper(*stc, client, g.globalConf) return s.scrapeByFragment(ctx, input) } -func (g group) viaScene(ctx context.Context, client *http.Client, scene *models.Scene) (*models.ScrapedScene, error) { +func (g group) viaScene(ctx context.Context, client *http.Client, scene *models.Scene) (*ScrapedScene, error) { if g.config.SceneByFragment == nil { return nil, ErrNotSupported } - s := g.config.getScraper(*g.config.SceneByFragment, client, g.txnManager, g.globalConf) + s := g.config.getScraper(*g.config.SceneByFragment, client, g.globalConf) return s.scrapeSceneByScene(ctx, scene) } -func (g group) viaGallery(ctx context.Context, client *http.Client, gallery *models.Gallery) (*models.ScrapedGallery, error) { +func (g group) viaGallery(ctx context.Context, client *http.Client, gallery *models.Gallery) (*ScrapedGallery, error) { if g.config.GalleryByFragment == nil { return nil, ErrNotSupported } - s := g.config.getScraper(*g.config.GalleryByFragment, client, g.txnManager, g.globalConf) + s := g.config.getScraper(*g.config.GalleryByFragment, client, g.globalConf) return s.scrapeGalleryByGallery(ctx, gallery) } -func loadUrlCandidates(c config, ty models.ScrapeContentType) []*scrapeByURLConfig { +func loadUrlCandidates(c config, ty ScrapeContentType) []*scrapeByURLConfig { switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: return c.PerformerByURL - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: return c.SceneByURL - case models.ScrapeContentTypeMovie: + case ScrapeContentTypeMovie: return c.MovieByURL - case models.ScrapeContentTypeGallery: + case ScrapeContentTypeGallery: return c.GalleryByURL } panic("loadUrlCandidates: unreachable") } -func (g group) viaURL(ctx context.Context, client *http.Client, url string, ty models.ScrapeContentType) (models.ScrapedContent, error) { +func (g group) viaURL(ctx context.Context, client *http.Client, url string, ty ScrapeContentType) (ScrapedContent, error) { candidates := loadUrlCandidates(g.config, ty) for _, scraper := range candidates { if scraper.matchesURL(url) { - s := g.config.getScraper(scraper.scraperTypeConfig, client, g.txnManager, g.globalConf) + s := g.config.getScraper(scraper.scraperTypeConfig, client, g.globalConf) ret, err := s.scrapeByURL(ctx, url, ty) if err != nil { return nil, err @@ -111,31 +109,31 @@ func (g group) viaURL(ctx context.Context, client *http.Client, url string, ty m return nil, nil } -func (g group) viaName(ctx context.Context, client *http.Client, name string, ty models.ScrapeContentType) ([]models.ScrapedContent, error) { +func (g group) viaName(ctx context.Context, client *http.Client, name string, ty ScrapeContentType) ([]ScrapedContent, error) { switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: if g.config.PerformerByName == nil { break } - s := g.config.getScraper(*g.config.PerformerByName, client, g.txnManager, g.globalConf) + s := g.config.getScraper(*g.config.PerformerByName, client, g.globalConf) return s.scrapeByName(ctx, name, ty) - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: if g.config.SceneByName == nil { break } - s := g.config.getScraper(*g.config.SceneByName, client, g.txnManager, g.globalConf) + s := g.config.getScraper(*g.config.SceneByName, client, g.globalConf) return s.scrapeByName(ctx, name, ty) } return nil, fmt.Errorf("%w: cannot load %v by name", ErrNotSupported, ty) } -func (g group) supports(ty models.ScrapeContentType) bool { +func (g group) supports(ty ScrapeContentType) bool { return g.config.supports(ty) } -func (g group) supportsURL(url string, ty models.ScrapeContentType) bool { +func (g group) supportsURL(url string, ty ScrapeContentType) bool { return g.config.matchesURL(url, ty) } diff --git a/pkg/scraper/image.go b/pkg/scraper/image.go index 9b48f9f4f..5757bc9b3 100644 --- a/pkg/scraper/image.go +++ b/pkg/scraper/image.go @@ -29,7 +29,7 @@ func setPerformerImage(ctx context.Context, client *http.Client, p *models.Scrap return nil } -func setSceneImage(ctx context.Context, client *http.Client, s *models.ScrapedScene, globalConfig GlobalConfig) error { +func setSceneImage(ctx context.Context, client *http.Client, s *ScrapedScene, globalConfig GlobalConfig) error { // don't try to get the image if it doesn't appear to be a URL if s.Image == nil || !strings.HasPrefix(*s.Image, "http") { // nothing to do diff --git a/pkg/scraper/json.go b/pkg/scraper/json.go index cca0556d0..1d6358a92 100644 --- a/pkg/scraper/json.go +++ b/pkg/scraper/json.go @@ -19,16 +19,14 @@ type jsonScraper struct { config config globalConfig GlobalConfig client *http.Client - txnManager models.TransactionManager } -func newJsonScraper(scraper scraperTypeConfig, client *http.Client, txnManager models.TransactionManager, config config, globalConfig GlobalConfig) *jsonScraper { +func newJsonScraper(scraper scraperTypeConfig, client *http.Client, config config, globalConfig GlobalConfig) *jsonScraper { return &jsonScraper{ scraper: scraper, config: config, client: client, globalConfig: globalConfig, - txnManager: txnManager, } } @@ -75,7 +73,7 @@ func (s *jsonScraper) loadURL(ctx context.Context, url string) (string, error) { return docStr, err } -func (s *jsonScraper) scrapeByURL(ctx context.Context, url string, ty models.ScrapeContentType) (models.ScrapedContent, error) { +func (s *jsonScraper) scrapeByURL(ctx context.Context, url string, ty ScrapeContentType) (ScrapedContent, error) { u := replaceURL(url, s.scraper) // allow a URL Replace for url-queries doc, scraper, err := s.scrapeURL(ctx, u) if err != nil { @@ -84,20 +82,20 @@ func (s *jsonScraper) scrapeByURL(ctx context.Context, url string, ty models.Scr q := s.getJsonQuery(doc) switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: return scraper.scrapePerformer(ctx, q) - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: return scraper.scrapeScene(ctx, q) - case models.ScrapeContentTypeGallery: + case ScrapeContentTypeGallery: return scraper.scrapeGallery(ctx, q) - case models.ScrapeContentTypeMovie: + case ScrapeContentTypeMovie: return scraper.scrapeMovie(ctx, q) } return nil, ErrNotSupported } -func (s *jsonScraper) scrapeByName(ctx context.Context, name string, ty models.ScrapeContentType) ([]models.ScrapedContent, error) { +func (s *jsonScraper) scrapeByName(ctx context.Context, name string, ty ScrapeContentType) ([]ScrapedContent, error) { scraper := s.getJsonScraper() if scraper == nil { @@ -121,9 +119,9 @@ func (s *jsonScraper) scrapeByName(ctx context.Context, name string, ty models.S q := s.getJsonQuery(doc) q.setType(SearchQuery) - var content []models.ScrapedContent + var content []ScrapedContent switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: performers, err := scraper.scrapePerformers(ctx, q) if err != nil { return nil, err @@ -134,7 +132,7 @@ func (s *jsonScraper) scrapeByName(ctx context.Context, name string, ty models.S } return content, nil - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: scenes, err := scraper.scrapeScenes(ctx, q) if err != nil { return nil, err @@ -150,7 +148,7 @@ func (s *jsonScraper) scrapeByName(ctx context.Context, name string, ty models.S return nil, ErrNotSupported } -func (s *jsonScraper) scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*models.ScrapedScene, error) { +func (s *jsonScraper) scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*ScrapedScene, error) { // construct the URL queryURL := queryURLParametersFromScene(scene) if s.scraper.QueryURLReplacements != nil { @@ -174,7 +172,7 @@ func (s *jsonScraper) scrapeSceneByScene(ctx context.Context, scene *models.Scen return scraper.scrapeScene(ctx, q) } -func (s *jsonScraper) scrapeByFragment(ctx context.Context, input Input) (models.ScrapedContent, error) { +func (s *jsonScraper) scrapeByFragment(ctx context.Context, input Input) (ScrapedContent, error) { switch { case input.Gallery != nil: return nil, fmt.Errorf("%w: cannot use a json scraper as a gallery fragment scraper", ErrNotSupported) @@ -209,7 +207,7 @@ func (s *jsonScraper) scrapeByFragment(ctx context.Context, input Input) (models return scraper.scrapeScene(ctx, q) } -func (s *jsonScraper) scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*models.ScrapedGallery, error) { +func (s *jsonScraper) scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*ScrapedGallery, error) { // construct the URL queryURL := queryURLParametersFromGallery(gallery) if s.scraper.QueryURLReplacements != nil { diff --git a/pkg/scraper/mapped.go b/pkg/scraper/mapped.go index 4753bb182..e10d1ed65 100644 --- a/pkg/scraper/mapped.go +++ b/pkg/scraper/mapped.go @@ -809,8 +809,8 @@ func (s mappedScraper) scrapePerformers(ctx context.Context, q mappedQuery) ([]* return ret, nil } -func (s mappedScraper) processScene(ctx context.Context, q mappedQuery, r mappedResult) *models.ScrapedScene { - var ret models.ScrapedScene +func (s mappedScraper) processScene(ctx context.Context, q mappedQuery, r mappedResult) *ScrapedScene { + var ret ScrapedScene sceneScraperConfig := s.Scene @@ -884,8 +884,8 @@ func (s mappedScraper) processScene(ctx context.Context, q mappedQuery, r mapped return &ret } -func (s mappedScraper) scrapeScenes(ctx context.Context, q mappedQuery) ([]*models.ScrapedScene, error) { - var ret []*models.ScrapedScene +func (s mappedScraper) scrapeScenes(ctx context.Context, q mappedQuery) ([]*ScrapedScene, error) { + var ret []*ScrapedScene sceneScraperConfig := s.Scene sceneMap := sceneScraperConfig.mappedConfig @@ -903,8 +903,8 @@ func (s mappedScraper) scrapeScenes(ctx context.Context, q mappedQuery) ([]*mode return ret, nil } -func (s mappedScraper) scrapeScene(ctx context.Context, q mappedQuery) (*models.ScrapedScene, error) { - var ret *models.ScrapedScene +func (s mappedScraper) scrapeScene(ctx context.Context, q mappedQuery) (*ScrapedScene, error) { + var ret *ScrapedScene sceneScraperConfig := s.Scene sceneMap := sceneScraperConfig.mappedConfig @@ -921,8 +921,8 @@ func (s mappedScraper) scrapeScene(ctx context.Context, q mappedQuery) (*models. return ret, nil } -func (s mappedScraper) scrapeGallery(ctx context.Context, q mappedQuery) (*models.ScrapedGallery, error) { - var ret *models.ScrapedGallery +func (s mappedScraper) scrapeGallery(ctx context.Context, q mappedQuery) (*ScrapedGallery, error) { + var ret *ScrapedGallery galleryScraperConfig := s.Gallery galleryMap := galleryScraperConfig.mappedConfig @@ -937,7 +937,7 @@ func (s mappedScraper) scrapeGallery(ctx context.Context, q mappedQuery) (*model logger.Debug(`Processing gallery:`) results := galleryMap.process(ctx, q, s.Common) if len(results) > 0 { - ret = &models.ScrapedGallery{} + ret = &ScrapedGallery{} results[0].apply(ret) diff --git a/pkg/scraper/movie.go b/pkg/scraper/movie.go new file mode 100644 index 000000000..4416b6199 --- /dev/null +++ b/pkg/scraper/movie.go @@ -0,0 +1,12 @@ +package scraper + +type ScrapedMovieInput struct { + Name *string `json:"name"` + Aliases *string `json:"aliases"` + Duration *string `json:"duration"` + Date *string `json:"date"` + Rating *string `json:"rating"` + Director *string `json:"director"` + URL *string `json:"url"` + Synopsis *string `json:"synopsis"` +} diff --git a/pkg/scraper/performer.go b/pkg/scraper/performer.go new file mode 100644 index 000000000..f97250736 --- /dev/null +++ b/pkg/scraper/performer.go @@ -0,0 +1,27 @@ +package scraper + +type ScrapedPerformerInput struct { + // Set if performer matched + StoredID *string `json:"stored_id"` + Name *string `json:"name"` + Gender *string `json:"gender"` + URL *string `json:"url"` + Twitter *string `json:"twitter"` + Instagram *string `json:"instagram"` + Birthdate *string `json:"birthdate"` + Ethnicity *string `json:"ethnicity"` + Country *string `json:"country"` + EyeColor *string `json:"eye_color"` + Height *string `json:"height"` + Measurements *string `json:"measurements"` + FakeTits *string `json:"fake_tits"` + CareerLength *string `json:"career_length"` + Tattoos *string `json:"tattoos"` + Piercings *string `json:"piercings"` + Aliases *string `json:"aliases"` + Details *string `json:"details"` + DeathDate *string `json:"death_date"` + HairColor *string `json:"hair_color"` + Weight *string `json:"weight"` + RemoteSiteID *string `json:"remote_site_id"` +} diff --git a/pkg/scraper/postprocessing.go b/pkg/scraper/postprocessing.go index 731769310..4151602d2 100644 --- a/pkg/scraper/postprocessing.go +++ b/pkg/scraper/postprocessing.go @@ -6,12 +6,14 @@ import ( "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/tag" + "github.com/stashapp/stash/pkg/txn" ) // postScrape handles post-processing of scraped content. If the content // requires post-processing, this function fans out to the given content // type and post-processes it. -func (c Cache) postScrape(ctx context.Context, content models.ScrapedContent) (models.ScrapedContent, error) { +func (c Cache) postScrape(ctx context.Context, content ScrapedContent) (ScrapedContent, error) { // Analyze the concrete type, call the right post-processing function switch v := content.(type) { case *models.ScrapedPerformer: @@ -20,17 +22,17 @@ func (c Cache) postScrape(ctx context.Context, content models.ScrapedContent) (m } case models.ScrapedPerformer: return c.postScrapePerformer(ctx, v) - case *models.ScrapedScene: + case *ScrapedScene: if v != nil { return c.postScrapeScene(ctx, *v) } - case models.ScrapedScene: + case ScrapedScene: return c.postScrapeScene(ctx, v) - case *models.ScrapedGallery: + case *ScrapedGallery: if v != nil { return c.postScrapeGallery(ctx, *v) } - case models.ScrapedGallery: + case ScrapedGallery: return c.postScrapeGallery(ctx, v) case *models.ScrapedMovie: if v != nil { @@ -44,11 +46,11 @@ func (c Cache) postScrape(ctx context.Context, content models.ScrapedContent) (m return content, nil } -func (c Cache) postScrapePerformer(ctx context.Context, p models.ScrapedPerformer) (models.ScrapedContent, error) { - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - tqb := r.Tag() +func (c Cache) postScrapePerformer(ctx context.Context, p models.ScrapedPerformer) (ScrapedContent, error) { + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + tqb := c.repository.TagFinder - tags, err := postProcessTags(tqb, p.Tags) + tags, err := postProcessTags(ctx, tqb, p.Tags) if err != nil { return err } @@ -67,10 +69,10 @@ func (c Cache) postScrapePerformer(ctx context.Context, p models.ScrapedPerforme return p, nil } -func (c Cache) postScrapeMovie(ctx context.Context, m models.ScrapedMovie) (models.ScrapedContent, error) { +func (c Cache) postScrapeMovie(ctx context.Context, m models.ScrapedMovie) (ScrapedContent, error) { if m.Studio != nil { - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - return match.ScrapedStudio(r.Studio(), m.Studio, nil) + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + return match.ScrapedStudio(ctx, c.repository.StudioFinder, m.Studio, nil) }); err != nil { return nil, err } @@ -88,29 +90,23 @@ func (c Cache) postScrapeMovie(ctx context.Context, m models.ScrapedMovie) (mode } func (c Cache) postScrapeScenePerformer(ctx context.Context, p models.ScrapedPerformer) error { - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - tqb := r.Tag() + tqb := c.repository.TagFinder - tags, err := postProcessTags(tqb, p.Tags) - if err != nil { - return err - } - p.Tags = tags - - return nil - }); err != nil { + tags, err := postProcessTags(ctx, tqb, p.Tags) + if err != nil { return err } + p.Tags = tags return nil } -func (c Cache) postScrapeScene(ctx context.Context, scene models.ScrapedScene) (models.ScrapedContent, error) { - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - pqb := r.Performer() - mqb := r.Movie() - tqb := r.Tag() - sqb := r.Studio() +func (c Cache) postScrapeScene(ctx context.Context, scene ScrapedScene) (ScrapedContent, error) { + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + pqb := c.repository.PerformerFinder + mqb := c.repository.MovieFinder + tqb := c.repository.TagFinder + sqb := c.repository.StudioFinder for _, p := range scene.Performers { if p == nil { @@ -121,26 +117,26 @@ func (c Cache) postScrapeScene(ctx context.Context, scene models.ScrapedScene) ( return err } - if err := match.ScrapedPerformer(pqb, p, nil); err != nil { + if err := match.ScrapedPerformer(ctx, pqb, p, nil); err != nil { return err } } for _, p := range scene.Movies { - err := match.ScrapedMovie(mqb, p) + err := match.ScrapedMovie(ctx, mqb, p) if err != nil { return err } } - tags, err := postProcessTags(tqb, scene.Tags) + tags, err := postProcessTags(ctx, tqb, scene.Tags) if err != nil { return err } scene.Tags = tags if scene.Studio != nil { - err := match.ScrapedStudio(sqb, scene.Studio, nil) + err := match.ScrapedStudio(ctx, sqb, scene.Studio, nil) if err != nil { return err } @@ -159,27 +155,27 @@ func (c Cache) postScrapeScene(ctx context.Context, scene models.ScrapedScene) ( return scene, nil } -func (c Cache) postScrapeGallery(ctx context.Context, g models.ScrapedGallery) (models.ScrapedContent, error) { - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - pqb := r.Performer() - tqb := r.Tag() - sqb := r.Studio() +func (c Cache) postScrapeGallery(ctx context.Context, g ScrapedGallery) (ScrapedContent, error) { + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + pqb := c.repository.PerformerFinder + tqb := c.repository.TagFinder + sqb := c.repository.StudioFinder for _, p := range g.Performers { - err := match.ScrapedPerformer(pqb, p, nil) + err := match.ScrapedPerformer(ctx, pqb, p, nil) if err != nil { return err } } - tags, err := postProcessTags(tqb, g.Tags) + tags, err := postProcessTags(ctx, tqb, g.Tags) if err != nil { return err } g.Tags = tags if g.Studio != nil { - err := match.ScrapedStudio(sqb, g.Studio, nil) + err := match.ScrapedStudio(ctx, sqb, g.Studio, nil) if err != nil { return err } @@ -193,11 +189,11 @@ func (c Cache) postScrapeGallery(ctx context.Context, g models.ScrapedGallery) ( return g, nil } -func postProcessTags(tqb models.TagReader, scrapedTags []*models.ScrapedTag) ([]*models.ScrapedTag, error) { +func postProcessTags(ctx context.Context, tqb tag.Queryer, scrapedTags []*models.ScrapedTag) ([]*models.ScrapedTag, error) { var ret []*models.ScrapedTag for _, t := range scrapedTags { - err := match.ScrapedTag(tqb, t) + err := match.ScrapedTag(ctx, tqb, t) if err != nil { return nil, err } diff --git a/pkg/scraper/query_url.go b/pkg/scraper/query_url.go index 2826e15e4..d3bad0781 100644 --- a/pkg/scraper/query_url.go +++ b/pkg/scraper/query_url.go @@ -13,15 +13,20 @@ type queryURLParameters map[string]string func queryURLParametersFromScene(scene *models.Scene) queryURLParameters { ret := make(queryURLParameters) - ret["checksum"] = scene.Checksum.String - ret["oshash"] = scene.OSHash.String + ret["checksum"] = scene.Checksum + ret["oshash"] = scene.OSHash ret["filename"] = filepath.Base(scene.Path) - ret["title"] = scene.Title.String - ret["url"] = scene.URL.String + + if scene.Title != "" { + ret["title"] = scene.Title + } + if scene.URL != "" { + ret["url"] = scene.URL + } return ret } -func queryURLParametersFromScrapedScene(scene models.ScrapedSceneInput) queryURLParameters { +func queryURLParametersFromScrapedScene(scene ScrapedSceneInput) queryURLParameters { ret := make(queryURLParameters) setField := func(field string, value *string) { @@ -46,13 +51,18 @@ func queryURLParameterFromURL(url string) queryURLParameters { func queryURLParametersFromGallery(gallery *models.Gallery) queryURLParameters { ret := make(queryURLParameters) - ret["checksum"] = gallery.Checksum + ret["checksum"] = gallery.Checksum() - if gallery.Path.Valid { - ret["filename"] = filepath.Base(gallery.Path.String) + if gallery.Path != "" { + ret["filename"] = filepath.Base(gallery.Path) + } + if gallery.Title != "" { + ret["title"] = gallery.Title + } + + if gallery.URL != "" { + ret["url"] = gallery.URL } - ret["title"] = gallery.Title.String - ret["url"] = gallery.URL.String return ret } diff --git a/pkg/scraper/scene.go b/pkg/scraper/scene.go new file mode 100644 index 000000000..9b5a60191 --- /dev/null +++ b/pkg/scraper/scene.go @@ -0,0 +1,32 @@ +package scraper + +import ( + "github.com/stashapp/stash/pkg/models" +) + +type ScrapedScene struct { + Title *string `json:"title"` + Details *string `json:"details"` + URL *string `json:"url"` + Date *string `json:"date"` + // This should be a base64 encoded data URL + Image *string `json:"image"` + File *models.SceneFileType `json:"file"` + Studio *models.ScrapedStudio `json:"studio"` + Tags []*models.ScrapedTag `json:"tags"` + Performers []*models.ScrapedPerformer `json:"performers"` + Movies []*models.ScrapedMovie `json:"movies"` + RemoteSiteID *string `json:"remote_site_id"` + Duration *int `json:"duration"` + Fingerprints []*models.StashBoxFingerprint `json:"fingerprints"` +} + +func (ScrapedScene) IsScrapedContent() {} + +type ScrapedSceneInput struct { + Title *string `json:"title"` + Details *string `json:"details"` + URL *string `json:"url"` + Date *string `json:"date"` + RemoteSiteID *string `json:"remote_site_id"` +} diff --git a/pkg/scraper/scraper.go b/pkg/scraper/scraper.go index 3a2fcc054..67569c18b 100644 --- a/pkg/scraper/scraper.go +++ b/pkg/scraper/scraper.go @@ -3,11 +3,139 @@ package scraper import ( "context" "errors" + "fmt" + "io" "net/http" + "strconv" "github.com/stashapp/stash/pkg/models" ) +type Source struct { + // Index of the configured stash-box instance to use. Should be unset if scraper_id is set + StashBoxIndex *int `json:"stash_box_index"` + // Stash-box endpoint + StashBoxEndpoint *string `json:"stash_box_endpoint"` + // Scraper ID to scrape with. Should be unset if stash_box_index is set + ScraperID *string `json:"scraper_id"` +} + +// Scraped Content is the forming union over the different scrapers +type ScrapedContent interface { + IsScrapedContent() +} + +// Type of the content a scraper generates +type ScrapeContentType string + +const ( + ScrapeContentTypeGallery ScrapeContentType = "GALLERY" + ScrapeContentTypeMovie ScrapeContentType = "MOVIE" + ScrapeContentTypePerformer ScrapeContentType = "PERFORMER" + ScrapeContentTypeScene ScrapeContentType = "SCENE" +) + +var AllScrapeContentType = []ScrapeContentType{ + ScrapeContentTypeGallery, + ScrapeContentTypeMovie, + ScrapeContentTypePerformer, + ScrapeContentTypeScene, +} + +func (e ScrapeContentType) IsValid() bool { + switch e { + case ScrapeContentTypeGallery, ScrapeContentTypeMovie, ScrapeContentTypePerformer, ScrapeContentTypeScene: + return true + } + return false +} + +func (e ScrapeContentType) String() string { + return string(e) +} + +func (e *ScrapeContentType) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = ScrapeContentType(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid ScrapeContentType", str) + } + return nil +} + +func (e ScrapeContentType) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +type Scraper struct { + ID string `json:"id"` + Name string `json:"name"` + // Details for performer scraper + Performer *ScraperSpec `json:"performer"` + // Details for scene scraper + Scene *ScraperSpec `json:"scene"` + // Details for gallery scraper + Gallery *ScraperSpec `json:"gallery"` + // Details for movie scraper + Movie *ScraperSpec `json:"movie"` +} + +type ScraperSpec struct { + // URLs matching these can be scraped with + Urls []string `json:"urls"` + SupportedScrapes []ScrapeType `json:"supported_scrapes"` +} + +type ScrapeType string + +const ( + // From text query + ScrapeTypeName ScrapeType = "NAME" + // From existing object + ScrapeTypeFragment ScrapeType = "FRAGMENT" + // From URL + ScrapeTypeURL ScrapeType = "URL" +) + +var AllScrapeType = []ScrapeType{ + ScrapeTypeName, + ScrapeTypeFragment, + ScrapeTypeURL, +} + +func (e ScrapeType) IsValid() bool { + switch e { + case ScrapeTypeName, ScrapeTypeFragment, ScrapeTypeURL: + return true + } + return false +} + +func (e ScrapeType) String() string { + return string(e) +} + +func (e *ScrapeType) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = ScrapeType(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid ScrapeType", str) + } + return nil +} + +func (e ScrapeType) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + var ( // ErrMaxRedirects is returned if the max number of HTTP redirects are reached. ErrMaxRedirects = errors.New("maximum number of HTTP redirects reached") @@ -24,9 +152,9 @@ var ( // The system expects one of these to be set, and the remaining to be // set to nil. type Input struct { - Performer *models.ScrapedPerformerInput - Scene *models.ScrapedSceneInput - Gallery *models.ScrapedGalleryInput + Performer *ScrapedPerformerInput + Scene *ScrapedSceneInput + Gallery *ScrapedGalleryInput } // simple type definitions that can help customize @@ -41,32 +169,32 @@ const ( // scraper is the generic interface to the scraper subsystems type scraper interface { // spec returns the scraper specification, suitable for graphql - spec() models.Scraper + spec() Scraper // supports tests if the scraper supports a given content type - supports(models.ScrapeContentType) bool + supports(ScrapeContentType) bool // supportsURL tests if the scraper supports scrapes of a given url, producing a given content type - supportsURL(url string, ty models.ScrapeContentType) bool + supportsURL(url string, ty ScrapeContentType) bool } // urlScraper is the interface of scrapers supporting url loads type urlScraper interface { scraper - viaURL(ctx context.Context, client *http.Client, url string, ty models.ScrapeContentType) (models.ScrapedContent, error) + viaURL(ctx context.Context, client *http.Client, url string, ty ScrapeContentType) (ScrapedContent, error) } // nameScraper is the interface of scrapers supporting name loads type nameScraper interface { scraper - viaName(ctx context.Context, client *http.Client, name string, ty models.ScrapeContentType) ([]models.ScrapedContent, error) + viaName(ctx context.Context, client *http.Client, name string, ty ScrapeContentType) ([]ScrapedContent, error) } // fragmentScraper is the interface of scrapers supporting fragment loads type fragmentScraper interface { scraper - viaFragment(ctx context.Context, client *http.Client, input Input) (models.ScrapedContent, error) + viaFragment(ctx context.Context, client *http.Client, input Input) (ScrapedContent, error) } // sceneScraper is a scraper which supports scene scrapes with @@ -74,7 +202,7 @@ type fragmentScraper interface { type sceneScraper interface { scraper - viaScene(ctx context.Context, client *http.Client, scene *models.Scene) (*models.ScrapedScene, error) + viaScene(ctx context.Context, client *http.Client, scene *models.Scene) (*ScrapedScene, error) } // galleryScraper is a scraper which supports gallery scrapes with @@ -82,5 +210,5 @@ type sceneScraper interface { type galleryScraper interface { scraper - viaGallery(ctx context.Context, client *http.Client, gallery *models.Gallery) (*models.ScrapedGallery, error) + viaGallery(ctx context.Context, client *http.Client, gallery *models.Gallery) (*ScrapedGallery, error) } diff --git a/pkg/scraper/script.go b/pkg/scraper/script.go index 4a9074fc2..5d6e25acf 100644 --- a/pkg/scraper/script.go +++ b/pkg/scraper/script.go @@ -125,13 +125,13 @@ func (s *scriptScraper) runScraperScript(ctx context.Context, inString string, o return nil } -func (s *scriptScraper) scrapeByName(ctx context.Context, name string, ty models.ScrapeContentType) ([]models.ScrapedContent, error) { +func (s *scriptScraper) scrapeByName(ctx context.Context, name string, ty ScrapeContentType) ([]ScrapedContent, error) { input := `{"name": "` + name + `"}` - var ret []models.ScrapedContent + var ret []ScrapedContent var err error switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: var performers []models.ScrapedPerformer err = s.runScraperScript(ctx, input, &performers) if err == nil { @@ -140,8 +140,8 @@ func (s *scriptScraper) scrapeByName(ctx context.Context, name string, ty models ret = append(ret, &v) } } - case models.ScrapeContentTypeScene: - var scenes []models.ScrapedScene + case ScrapeContentTypeScene: + var scenes []ScrapedScene err = s.runScraperScript(ctx, input, &scenes) if err == nil { for _, s := range scenes { @@ -156,20 +156,20 @@ func (s *scriptScraper) scrapeByName(ctx context.Context, name string, ty models return ret, err } -func (s *scriptScraper) scrapeByFragment(ctx context.Context, input Input) (models.ScrapedContent, error) { +func (s *scriptScraper) scrapeByFragment(ctx context.Context, input Input) (ScrapedContent, error) { var inString []byte var err error - var ty models.ScrapeContentType + var ty ScrapeContentType switch { case input.Performer != nil: inString, err = json.Marshal(*input.Performer) - ty = models.ScrapeContentTypePerformer + ty = ScrapeContentTypePerformer case input.Gallery != nil: inString, err = json.Marshal(*input.Gallery) - ty = models.ScrapeContentTypeGallery + ty = ScrapeContentTypeGallery case input.Scene != nil: inString, err = json.Marshal(*input.Scene) - ty = models.ScrapeContentTypeScene + ty = ScrapeContentTypeScene } if err != nil { @@ -179,25 +179,25 @@ func (s *scriptScraper) scrapeByFragment(ctx context.Context, input Input) (mode return s.scrape(ctx, string(inString), ty) } -func (s *scriptScraper) scrapeByURL(ctx context.Context, url string, ty models.ScrapeContentType) (models.ScrapedContent, error) { +func (s *scriptScraper) scrapeByURL(ctx context.Context, url string, ty ScrapeContentType) (ScrapedContent, error) { return s.scrape(ctx, `{"url": "`+url+`"}`, ty) } -func (s *scriptScraper) scrape(ctx context.Context, input string, ty models.ScrapeContentType) (models.ScrapedContent, error) { +func (s *scriptScraper) scrape(ctx context.Context, input string, ty ScrapeContentType) (ScrapedContent, error) { switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: var performer *models.ScrapedPerformer err := s.runScraperScript(ctx, input, &performer) return performer, err - case models.ScrapeContentTypeGallery: - var gallery *models.ScrapedGallery + case ScrapeContentTypeGallery: + var gallery *ScrapedGallery err := s.runScraperScript(ctx, input, &gallery) return gallery, err - case models.ScrapeContentTypeScene: - var scene *models.ScrapedScene + case ScrapeContentTypeScene: + var scene *ScrapedScene err := s.runScraperScript(ctx, input, &scene) return scene, err - case models.ScrapeContentTypeMovie: + case ScrapeContentTypeMovie: var movie *models.ScrapedMovie err := s.runScraperScript(ctx, input, &movie) return movie, err @@ -206,28 +206,28 @@ func (s *scriptScraper) scrape(ctx context.Context, input string, ty models.Scra return nil, ErrNotSupported } -func (s *scriptScraper) scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*models.ScrapedScene, error) { +func (s *scriptScraper) scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*ScrapedScene, error) { inString, err := json.Marshal(sceneToUpdateInput(scene)) if err != nil { return nil, err } - var ret *models.ScrapedScene + var ret *ScrapedScene err = s.runScraperScript(ctx, string(inString), &ret) return ret, err } -func (s *scriptScraper) scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*models.ScrapedGallery, error) { +func (s *scriptScraper) scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*ScrapedGallery, error) { inString, err := json.Marshal(galleryToUpdateInput(gallery)) if err != nil { return nil, err } - var ret *models.ScrapedGallery + var ret *ScrapedGallery err = s.runScraperScript(ctx, string(inString), &ret) diff --git a/pkg/scraper/stash.go b/pkg/scraper/stash.go index 8193f2a67..874e18952 100644 --- a/pkg/scraper/stash.go +++ b/pkg/scraper/stash.go @@ -2,7 +2,6 @@ package scraper import ( "context" - "database/sql" "fmt" "net/http" "strconv" @@ -18,16 +17,14 @@ type stashScraper struct { config config globalConfig GlobalConfig client *http.Client - txnManager models.TransactionManager } -func newStashScraper(scraper scraperTypeConfig, client *http.Client, txnManager models.TransactionManager, config config, globalConfig GlobalConfig) *stashScraper { +func newStashScraper(scraper scraperTypeConfig, client *http.Client, config config, globalConfig GlobalConfig) *stashScraper { return &stashScraper{ scraper: scraper, config: config, client: client, globalConfig: globalConfig, - txnManager: txnManager, } } @@ -83,7 +80,7 @@ type scrapedPerformerStash struct { Weight *string `graphql:"weight" json:"weight"` } -func (s *stashScraper) scrapeByFragment(ctx context.Context, input Input) (models.ScrapedContent, error) { +func (s *stashScraper) scrapeByFragment(ctx context.Context, input Input) (ScrapedContent, error) { if input.Gallery != nil || input.Scene != nil { return nil, fmt.Errorf("%w: using stash scraper as a fragment scraper", ErrNotSupported) } @@ -138,8 +135,8 @@ type stashFindSceneNamesResultType struct { Scenes []*scrapedSceneStash `graphql:"scenes"` } -func (s *stashScraper) scrapedStashSceneToScrapedScene(ctx context.Context, scene *scrapedSceneStash) (*models.ScrapedScene, error) { - ret := models.ScrapedScene{} +func (s *stashScraper) scrapedStashSceneToScrapedScene(ctx context.Context, scene *scrapedSceneStash) (*ScrapedScene, error) { + ret := ScrapedScene{} err := copier.Copy(&ret, scene) if err != nil { return nil, err @@ -154,7 +151,7 @@ func (s *stashScraper) scrapedStashSceneToScrapedScene(ctx context.Context, scen return &ret, nil } -func (s *stashScraper) scrapeByName(ctx context.Context, name string, ty models.ScrapeContentType) ([]models.ScrapedContent, error) { +func (s *stashScraper) scrapeByName(ctx context.Context, name string, ty ScrapeContentType) ([]ScrapedContent, error) { client := s.getStashClient() page := 1 @@ -168,9 +165,9 @@ func (s *stashScraper) scrapeByName(ctx context.Context, name string, ty models. }, } - var ret []models.ScrapedContent + var ret []ScrapedContent switch ty { - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: var q struct { FindScenes stashFindSceneNamesResultType `graphql:"findScenes(filter: $f)"` } @@ -189,7 +186,7 @@ func (s *stashScraper) scrapeByName(ctx context.Context, name string, ty models. } return ret, nil - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: var q struct { FindPerformers stashFindPerformerNamesResultType `graphql:"findPerformers(filter: $f)"` } @@ -221,7 +218,7 @@ type scrapedSceneStash struct { Performers []*scrapedPerformerStash `graphql:"performers" json:"performers"` } -func (s *stashScraper) scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*models.ScrapedScene, error) { +func (s *stashScraper) scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*ScrapedScene, error) { // query by MD5 var q struct { FindScene *scrapedSceneStash `graphql:"findSceneByHash(input: $c)"` @@ -232,9 +229,12 @@ func (s *stashScraper) scrapeSceneByScene(ctx context.Context, scene *models.Sce Oshash *string `graphql:"oshash" json:"oshash"` } + checksum := scene.Checksum + oshash := scene.OSHash + input := SceneHashInput{ - Checksum: &scene.Checksum.String, - Oshash: &scene.OSHash.String, + Checksum: &checksum, + Oshash: &oshash, } vars := map[string]interface{}{ @@ -273,7 +273,7 @@ type scrapedGalleryStash struct { Performers []*scrapedPerformerStash `graphql:"performers" json:"performers"` } -func (s *stashScraper) scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*models.ScrapedGallery, error) { +func (s *stashScraper) scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*ScrapedGallery, error) { var q struct { FindGallery *scrapedGalleryStash `graphql:"findGalleryByHash(input: $c)"` } @@ -282,8 +282,9 @@ func (s *stashScraper) scrapeGalleryByGallery(ctx context.Context, gallery *mode Checksum *string `graphql:"checksum" json:"checksum"` } + checksum := gallery.Checksum() input := GalleryHashInput{ - Checksum: &gallery.Checksum, + Checksum: &checksum, } vars := map[string]interface{}{ @@ -296,7 +297,7 @@ func (s *stashScraper) scrapeGalleryByGallery(ctx context.Context, gallery *mode } // need to copy back to a scraped scene - ret := models.ScrapedGallery{} + ret := ScrapedGallery{} if err := copier.Copy(&ret, q.FindGallery); err != nil { return nil, err } @@ -304,34 +305,15 @@ func (s *stashScraper) scrapeGalleryByGallery(ctx context.Context, gallery *mode return &ret, nil } -func (s *stashScraper) scrapeByURL(_ context.Context, _ string, _ models.ScrapeContentType) (models.ScrapedContent, error) { +func (s *stashScraper) scrapeByURL(_ context.Context, _ string, _ ScrapeContentType) (ScrapedContent, error) { return nil, ErrNotSupported } -func getScene(ctx context.Context, sceneID int, txnManager models.TransactionManager) (*models.Scene, error) { - var ret *models.Scene - if err := txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - var err error - ret, err = r.Scene().Find(sceneID) - return err - }); err != nil { - return nil, err - } - return ret, nil -} - func sceneToUpdateInput(scene *models.Scene) models.SceneUpdateInput { - toStringPtr := func(s sql.NullString) *string { - if s.Valid { - return &s.String - } - - return nil - } - - dateToStringPtr := func(s models.SQLiteDate) *string { - if s.Valid { - return &s.String + dateToStringPtr := func(s *models.Date) *string { + if s != nil { + v := s.String() + return &v } return nil @@ -339,37 +321,18 @@ func sceneToUpdateInput(scene *models.Scene) models.SceneUpdateInput { return models.SceneUpdateInput{ ID: strconv.Itoa(scene.ID), - Title: toStringPtr(scene.Title), - Details: toStringPtr(scene.Details), - URL: toStringPtr(scene.URL), + Title: &scene.Title, + Details: &scene.Details, + URL: &scene.URL, Date: dateToStringPtr(scene.Date), } } -func getGallery(ctx context.Context, galleryID int, txnManager models.TransactionManager) (*models.Gallery, error) { - var ret *models.Gallery - if err := txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - var err error - ret, err = r.Gallery().Find(galleryID) - return err - }); err != nil { - return nil, err - } - return ret, nil -} - func galleryToUpdateInput(gallery *models.Gallery) models.GalleryUpdateInput { - toStringPtr := func(s sql.NullString) *string { - if s.Valid { - return &s.String - } - - return nil - } - - dateToStringPtr := func(s models.SQLiteDate) *string { - if s.Valid { - return &s.String + dateToStringPtr := func(s *models.Date) *string { + if s != nil { + v := s.String() + return &v } return nil @@ -377,9 +340,9 @@ func galleryToUpdateInput(gallery *models.Gallery) models.GalleryUpdateInput { return models.GalleryUpdateInput{ ID: strconv.Itoa(gallery.ID), - Title: toStringPtr(gallery.Title), - Details: toStringPtr(gallery.Details), - URL: toStringPtr(gallery.URL), + Title: &gallery.Title, + Details: &gallery.Details, + URL: &gallery.URL, Date: dateToStringPtr(gallery.Date), } } diff --git a/pkg/scraper/stashbox/models.go b/pkg/scraper/stashbox/models.go new file mode 100644 index 000000000..60ef5b028 --- /dev/null +++ b/pkg/scraper/stashbox/models.go @@ -0,0 +1,8 @@ +package stashbox + +import "github.com/stashapp/stash/pkg/models" + +type StashBoxPerformerQueryResult struct { + Query string `json:"query"` + Results []*models.ScrapedPerformer `json:"results"` +} diff --git a/pkg/scraper/stashbox/stash_box.go b/pkg/scraper/stashbox/stash_box.go index abcadc0f2..5133bb172 100644 --- a/pkg/scraper/stashbox/stash_box.go +++ b/pkg/scraper/stashbox/stash_box.go @@ -17,24 +17,61 @@ import ( "golang.org/x/text/language" "github.com/Yamashou/gqlgenc/graphqljson" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/match" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/scraper" "github.com/stashapp/stash/pkg/scraper/stashbox/graphql" "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/studio" + "github.com/stashapp/stash/pkg/tag" + "github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/utils" ) +type SceneReader interface { + Find(ctx context.Context, id int) (*models.Scene, error) + models.StashIDLoader + models.VideoFileLoader +} + +type PerformerReader interface { + match.PerformerFinder + Find(ctx context.Context, id int) (*models.Performer, error) + FindBySceneID(ctx context.Context, sceneID int) ([]*models.Performer, error) + models.StashIDLoader + GetImage(ctx context.Context, performerID int) ([]byte, error) +} + +type StudioReader interface { + match.StudioFinder + studio.Finder + models.StashIDLoader +} +type TagFinder interface { + tag.Queryer + FindBySceneID(ctx context.Context, sceneID int) ([]*models.Tag, error) +} + +type Repository struct { + Scene SceneReader + Performer PerformerReader + Tag TagFinder + Studio StudioReader +} + // Client represents the client interface to a stash-box server instance. type Client struct { client *graphql.Client - txnManager models.TransactionManager + txnManager txn.Manager + repository Repository box models.StashBox } // NewClient returns a new instance of a stash-box client. -func NewClient(box models.StashBox, txnManager models.TransactionManager) *Client { +func NewClient(box models.StashBox, txnManager txn.Manager, repo Repository) *Client { authHeader := func(req *http.Request) { req.Header.Set("ApiKey", box.APIKey) } @@ -46,6 +83,7 @@ func NewClient(box models.StashBox, txnManager models.TransactionManager) *Clien return &Client{ client: client, txnManager: txnManager, + repository: repo, box: box, } } @@ -55,7 +93,7 @@ func (c Client) getHTTPClient() *http.Client { } // QueryStashBoxScene queries stash-box for scenes using a query string. -func (c Client) QueryStashBoxScene(ctx context.Context, queryStr string) ([]*models.ScrapedScene, error) { +func (c Client) QueryStashBoxScene(ctx context.Context, queryStr string) ([]*scraper.ScrapedScene, error) { scenes, err := c.client.SearchScene(ctx, queryStr) if err != nil { return nil, err @@ -63,7 +101,7 @@ func (c Client) QueryStashBoxScene(ctx context.Context, queryStr string) ([]*mod sceneFragments := scenes.SearchScene - var ret []*models.ScrapedScene + var ret []*scraper.ScrapedScene for _, s := range sceneFragments { ss, err := c.sceneFragmentToScrapedScene(ctx, s) if err != nil { @@ -77,7 +115,7 @@ func (c Client) QueryStashBoxScene(ctx context.Context, queryStr string) ([]*mod // FindStashBoxScenesByFingerprints queries stash-box for a scene using the // scene's MD5/OSHASH checksum, or PHash. -func (c Client) FindStashBoxSceneByFingerprints(ctx context.Context, sceneID int) ([]*models.ScrapedScene, error) { +func (c Client) FindStashBoxSceneByFingerprints(ctx context.Context, sceneID int) ([]*scraper.ScrapedScene, error) { res, err := c.FindStashBoxScenesByFingerprints(ctx, []int{sceneID}) if len(res) > 0 { return res[0], err @@ -88,14 +126,14 @@ func (c Client) FindStashBoxSceneByFingerprints(ctx context.Context, sceneID int // FindStashBoxScenesByFingerprints queries stash-box for scenes using every // scene's MD5/OSHASH checksum, or PHash, and returns results in the same order // as the input slice. -func (c Client) FindStashBoxScenesByFingerprints(ctx context.Context, ids []int) ([][]*models.ScrapedScene, error) { +func (c Client) FindStashBoxScenesByFingerprints(ctx context.Context, ids []int) ([][]*scraper.ScrapedScene, error) { var fingerprints [][]*graphql.FingerprintQueryInput - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - qb := r.Scene() + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + qb := c.repository.Scene for _, sceneID := range ids { - scene, err := qb.Find(sceneID) + scene, err := qb.Find(ctx, sceneID) if err != nil { return err } @@ -104,28 +142,37 @@ func (c Client) FindStashBoxScenesByFingerprints(ctx context.Context, ids []int) return fmt.Errorf("scene with id %d not found", sceneID) } + if err := scene.LoadFiles(ctx, c.repository.Scene); err != nil { + return err + } + var sceneFPs []*graphql.FingerprintQueryInput - if scene.Checksum.Valid { - sceneFPs = append(sceneFPs, &graphql.FingerprintQueryInput{ - Hash: scene.Checksum.String, - Algorithm: graphql.FingerprintAlgorithmMd5, - }) - } + for _, f := range scene.Files.List() { + checksum := f.Fingerprints.GetString(file.FingerprintTypeMD5) + if checksum != "" { + sceneFPs = append(sceneFPs, &graphql.FingerprintQueryInput{ + Hash: checksum, + Algorithm: graphql.FingerprintAlgorithmMd5, + }) + } - if scene.OSHash.Valid { - sceneFPs = append(sceneFPs, &graphql.FingerprintQueryInput{ - Hash: scene.OSHash.String, - Algorithm: graphql.FingerprintAlgorithmOshash, - }) - } + oshash := f.Fingerprints.GetString(file.FingerprintTypeOshash) + if oshash != "" { + sceneFPs = append(sceneFPs, &graphql.FingerprintQueryInput{ + Hash: oshash, + Algorithm: graphql.FingerprintAlgorithmOshash, + }) + } - if scene.Phash.Valid { - phashStr := utils.PhashToString(scene.Phash.Int64) - sceneFPs = append(sceneFPs, &graphql.FingerprintQueryInput{ - Hash: phashStr, - Algorithm: graphql.FingerprintAlgorithmPhash, - }) + phash := f.Fingerprints.GetInt64(file.FingerprintTypePhash) + if phash != 0 { + phashStr := utils.PhashToString(phash) + sceneFPs = append(sceneFPs, &graphql.FingerprintQueryInput{ + Hash: phashStr, + Algorithm: graphql.FingerprintAlgorithmPhash, + }) + } } fingerprints = append(fingerprints, sceneFPs) @@ -139,8 +186,8 @@ func (c Client) FindStashBoxScenesByFingerprints(ctx context.Context, ids []int) return c.findStashBoxScenesByFingerprints(ctx, fingerprints) } -func (c Client) findStashBoxScenesByFingerprints(ctx context.Context, scenes [][]*graphql.FingerprintQueryInput) ([][]*models.ScrapedScene, error) { - var ret [][]*models.ScrapedScene +func (c Client) findStashBoxScenesByFingerprints(ctx context.Context, scenes [][]*graphql.FingerprintQueryInput) ([][]*scraper.ScrapedScene, error) { + var ret [][]*scraper.ScrapedScene for i := 0; i < len(scenes); i += 40 { end := i + 40 if end > len(scenes) { @@ -153,7 +200,7 @@ func (c Client) findStashBoxScenesByFingerprints(ctx context.Context, scenes [][ } for _, sceneFragments := range scenes.FindScenesBySceneFingerprints { - var sceneResults []*models.ScrapedScene + var sceneResults []*scraper.ScrapedScene for _, scene := range sceneFragments { ss, err := c.sceneFragmentToScrapedScene(ctx, scene) if err != nil { @@ -176,11 +223,11 @@ func (c Client) SubmitStashBoxFingerprints(ctx context.Context, sceneIDs []strin var fingerprints []graphql.FingerprintSubmission - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - qb := r.Scene() + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + qb := c.repository.Scene for _, sceneID := range ids { - scene, err := qb.Find(sceneID) + scene, err := qb.Find(ctx, sceneID) if err != nil { return err } @@ -189,11 +236,15 @@ func (c Client) SubmitStashBoxFingerprints(ctx context.Context, sceneIDs []strin continue } - stashIDs, err := qb.GetStashIDs(sceneID) - if err != nil { + if err := scene.LoadStashIDs(ctx, qb); err != nil { return err } + if err := scene.LoadFiles(ctx, qb); err != nil { + return err + } + + stashIDs := scene.StashIDs.List() sceneStashID := "" for _, stashID := range stashIDs { if stashID.Endpoint == endpoint { @@ -202,40 +253,46 @@ func (c Client) SubmitStashBoxFingerprints(ctx context.Context, sceneIDs []strin } if sceneStashID != "" { - if scene.Checksum.Valid && scene.Duration.Valid { - fingerprint := graphql.FingerprintInput{ - Hash: scene.Checksum.String, - Algorithm: graphql.FingerprintAlgorithmMd5, - Duration: int(scene.Duration.Float64), - } - fingerprints = append(fingerprints, graphql.FingerprintSubmission{ - SceneID: sceneStashID, - Fingerprint: &fingerprint, - }) - } + for _, f := range scene.Files.List() { + duration := f.Duration - if scene.OSHash.Valid && scene.Duration.Valid { - fingerprint := graphql.FingerprintInput{ - Hash: scene.OSHash.String, - Algorithm: graphql.FingerprintAlgorithmOshash, - Duration: int(scene.Duration.Float64), - } - fingerprints = append(fingerprints, graphql.FingerprintSubmission{ - SceneID: sceneStashID, - Fingerprint: &fingerprint, - }) - } + if duration != 0 { + if checksum := f.Fingerprints.GetString(file.FingerprintTypeMD5); checksum != "" { + fingerprint := graphql.FingerprintInput{ + Hash: checksum, + Algorithm: graphql.FingerprintAlgorithmMd5, + Duration: int(duration), + } + fingerprints = append(fingerprints, graphql.FingerprintSubmission{ + SceneID: sceneStashID, + Fingerprint: &fingerprint, + }) + } - if scene.Phash.Valid && scene.Duration.Valid { - fingerprint := graphql.FingerprintInput{ - Hash: utils.PhashToString(scene.Phash.Int64), - Algorithm: graphql.FingerprintAlgorithmPhash, - Duration: int(scene.Duration.Float64), + if oshash := f.Fingerprints.GetString(file.FingerprintTypeOshash); oshash != "" { + fingerprint := graphql.FingerprintInput{ + Hash: oshash, + Algorithm: graphql.FingerprintAlgorithmOshash, + Duration: int(duration), + } + fingerprints = append(fingerprints, graphql.FingerprintSubmission{ + SceneID: sceneStashID, + Fingerprint: &fingerprint, + }) + } + + if phash := f.Fingerprints.GetInt64(file.FingerprintTypePhash); phash != 0 { + fingerprint := graphql.FingerprintInput{ + Hash: utils.PhashToString(phash), + Algorithm: graphql.FingerprintAlgorithmPhash, + Duration: int(duration), + } + fingerprints = append(fingerprints, graphql.FingerprintSubmission{ + SceneID: sceneStashID, + Fingerprint: &fingerprint, + }) + } } - fingerprints = append(fingerprints, graphql.FingerprintSubmission{ - SceneID: sceneStashID, - Fingerprint: &fingerprint, - }) } } } @@ -260,10 +317,10 @@ func (c Client) submitStashBoxFingerprints(ctx context.Context, fingerprints []g } // QueryStashBoxPerformer queries stash-box for performers using a query string. -func (c Client) QueryStashBoxPerformer(ctx context.Context, queryStr string) ([]*models.StashBoxPerformerQueryResult, error) { +func (c Client) QueryStashBoxPerformer(ctx context.Context, queryStr string) ([]*StashBoxPerformerQueryResult, error) { performers, err := c.queryStashBoxPerformer(ctx, queryStr) - res := []*models.StashBoxPerformerQueryResult{ + res := []*StashBoxPerformerQueryResult{ { Query: queryStr, Results: performers, @@ -298,7 +355,7 @@ func (c Client) queryStashBoxPerformer(ctx context.Context, queryStr string) ([] } // FindStashBoxPerformersByNames queries stash-box for performers by name -func (c Client) FindStashBoxPerformersByNames(ctx context.Context, performerIDs []string) ([]*models.StashBoxPerformerQueryResult, error) { +func (c Client) FindStashBoxPerformersByNames(ctx context.Context, performerIDs []string) ([]*StashBoxPerformerQueryResult, error) { ids, err := stringslice.StringSliceToIntSlice(performerIDs) if err != nil { return nil, err @@ -306,11 +363,11 @@ func (c Client) FindStashBoxPerformersByNames(ctx context.Context, performerIDs var performers []*models.Performer - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - qb := r.Performer() + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + qb := c.repository.Performer for _, performerID := range ids { - performer, err := qb.Find(performerID) + performer, err := qb.Find(ctx, performerID) if err != nil { return err } @@ -340,11 +397,11 @@ func (c Client) FindStashBoxPerformersByPerformerNames(ctx context.Context, perf var performers []*models.Performer - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - qb := r.Performer() + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + qb := c.repository.Performer for _, performerID := range ids { - performer, err := qb.Find(performerID) + performer, err := qb.Find(ctx, performerID) if err != nil { return err } @@ -376,8 +433,8 @@ func (c Client) FindStashBoxPerformersByPerformerNames(ctx context.Context, perf return ret, nil } -func (c Client) findStashBoxPerformersByNames(ctx context.Context, performers []*models.Performer) ([]*models.StashBoxPerformerQueryResult, error) { - var ret []*models.StashBoxPerformerQueryResult +func (c Client) findStashBoxPerformersByNames(ctx context.Context, performers []*models.Performer) ([]*StashBoxPerformerQueryResult, error) { + var ret []*StashBoxPerformerQueryResult for _, performer := range performers { if performer.Name.Valid { performerResults, err := c.queryStashBoxPerformer(ctx, performer.Name.String) @@ -385,7 +442,7 @@ func (c Client) findStashBoxPerformersByNames(ctx context.Context, performers [] return nil, err } - result := models.StashBoxPerformerQueryResult{ + result := StashBoxPerformerQueryResult{ Query: strconv.Itoa(performer.ID), Results: performerResults, } @@ -601,9 +658,9 @@ func getFingerprints(scene *graphql.SceneFragment) []*models.StashBoxFingerprint return fingerprints } -func (c Client) sceneFragmentToScrapedScene(ctx context.Context, s *graphql.SceneFragment) (*models.ScrapedScene, error) { +func (c Client) sceneFragmentToScrapedScene(ctx context.Context, s *graphql.SceneFragment) (*scraper.ScrapedScene, error) { stashID := s.ID - ss := &models.ScrapedScene{ + ss := &scraper.ScrapedScene{ Title: s.Title, Date: s.Date, Details: s.Details, @@ -621,9 +678,9 @@ func (c Client) sceneFragmentToScrapedScene(ctx context.Context, s *graphql.Scen ss.Image = getFirstImage(ctx, c.getHTTPClient(), s.Images) } - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - pqb := r.Performer() - tqb := r.Tag() + if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { + pqb := c.repository.Performer + tqb := c.repository.Tag if s.Studio != nil { studioID := s.Studio.ID @@ -633,7 +690,7 @@ func (c Client) sceneFragmentToScrapedScene(ctx context.Context, s *graphql.Scen RemoteSiteID: &studioID, } - err := match.ScrapedStudio(r.Studio(), ss.Studio, &c.box.Endpoint) + err := match.ScrapedStudio(ctx, c.repository.Studio, ss.Studio, &c.box.Endpoint) if err != nil { return err } @@ -642,7 +699,7 @@ func (c Client) sceneFragmentToScrapedScene(ctx context.Context, s *graphql.Scen for _, p := range s.Performers { sp := performerFragmentToScrapedScenePerformer(p.Performer) - err := match.ScrapedPerformer(pqb, sp, &c.box.Endpoint) + err := match.ScrapedPerformer(ctx, pqb, sp, &c.box.Endpoint) if err != nil { return err } @@ -655,7 +712,7 @@ func (c Client) sceneFragmentToScrapedScene(ctx context.Context, s *graphql.Scen Name: t.Name, } - err := match.ScrapedTag(tqb, st) + err := match.ScrapedTag(ctx, tqb, st) if err != nil { return err } @@ -701,121 +758,131 @@ func (c Client) GetUser(ctx context.Context) (*graphql.Me, error) { return c.client.Me(ctx) } -func (c Client) SubmitSceneDraft(ctx context.Context, sceneID int, endpoint string, imagePath string) (*string, error) { +func (c Client) SubmitSceneDraft(ctx context.Context, scene *models.Scene, endpoint string, imagePath string) (*string, error) { draft := graphql.SceneDraftInput{} - var image *os.File - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - qb := r.Scene() - pqb := r.Performer() - sqb := r.Studio() + var image io.Reader + r := c.repository + pqb := r.Performer + sqb := r.Studio - scene, err := qb.Find(sceneID) + if scene.Title != "" { + draft.Title = &scene.Title + } + if scene.Details != "" { + draft.Details = &scene.Details + } + if scene.URL != "" && len(strings.TrimSpace(scene.URL)) > 0 { + url := strings.TrimSpace(scene.URL) + draft.URL = &url + } + if scene.Date != nil { + v := scene.Date.String() + draft.Date = &v + } + + if scene.StudioID != nil { + studio, err := sqb.Find(ctx, int(*scene.StudioID)) if err != nil { - return err + return nil, err + } + studioDraft := graphql.DraftEntityInput{ + Name: studio.Name.String, } - if scene.Title.Valid { - draft.Title = &scene.Title.String + stashIDs, err := sqb.GetStashIDs(ctx, studio.ID) + if err != nil { + return nil, err } - if scene.Details.Valid { - draft.Details = &scene.Details.String - } - if len(strings.TrimSpace(scene.URL.String)) > 0 { - url := strings.TrimSpace(scene.URL.String) - draft.URL = &url - } - if scene.Date.Valid { - draft.Date = &scene.Date.String + for _, stashID := range stashIDs { + c := stashID + if stashID.Endpoint == endpoint { + studioDraft.ID = &c.StashID + break + } } + draft.Studio = &studioDraft + } - if scene.StudioID.Valid { - studio, err := sqb.Find(int(scene.StudioID.Int64)) - if err != nil { - return err - } - studioDraft := graphql.DraftEntityInput{ - Name: studio.Name.String, - } + fingerprints := []*graphql.FingerprintInput{} - stashIDs, err := sqb.GetStashIDs(studio.ID) - if err != nil { - return err - } - for _, stashID := range stashIDs { - if stashID.Endpoint == endpoint { - studioDraft.ID = &stashID.StashID - break + // submit all file fingerprints + if err := scene.LoadFiles(ctx, r.Scene); err != nil { + return nil, err + } + + for _, f := range scene.Files.List() { + duration := f.Duration + + if duration != 0 { + if oshash := f.Fingerprints.GetString(file.FingerprintTypeOshash); oshash != "" { + fingerprint := graphql.FingerprintInput{ + Hash: oshash, + Algorithm: graphql.FingerprintAlgorithmOshash, + Duration: int(duration), } - } - draft.Studio = &studioDraft - } - - fingerprints := []*graphql.FingerprintInput{} - if scene.OSHash.Valid && scene.Duration.Valid { - fingerprint := graphql.FingerprintInput{ - Hash: scene.OSHash.String, - Algorithm: graphql.FingerprintAlgorithmOshash, - Duration: int(scene.Duration.Float64), - } - fingerprints = append(fingerprints, &fingerprint) - } - - if scene.Checksum.Valid && scene.Duration.Valid { - fingerprint := graphql.FingerprintInput{ - Hash: scene.Checksum.String, - Algorithm: graphql.FingerprintAlgorithmMd5, - Duration: int(scene.Duration.Float64), - } - fingerprints = append(fingerprints, &fingerprint) - } - - if scene.Phash.Valid && scene.Duration.Valid { - fingerprint := graphql.FingerprintInput{ - Hash: utils.PhashToString(scene.Phash.Int64), - Algorithm: graphql.FingerprintAlgorithmPhash, - Duration: int(scene.Duration.Float64), - } - fingerprints = append(fingerprints, &fingerprint) - } - draft.Fingerprints = fingerprints - - scenePerformers, err := pqb.FindBySceneID(sceneID) - if err != nil { - return err - } - - performers := []*graphql.DraftEntityInput{} - for _, p := range scenePerformers { - performerDraft := graphql.DraftEntityInput{ - Name: p.Name.String, + fingerprints = append(fingerprints, &fingerprint) } - stashIDs, err := pqb.GetStashIDs(p.ID) - if err != nil { - return err - } - - for _, stashID := range stashIDs { - if stashID.Endpoint == endpoint { - performerDraft.ID = &stashID.StashID - break + if checksum := f.Fingerprints.GetString(file.FingerprintTypeMD5); checksum != "" { + fingerprint := graphql.FingerprintInput{ + Hash: checksum, + Algorithm: graphql.FingerprintAlgorithmMd5, + Duration: int(duration), } + fingerprints = append(fingerprints, &fingerprint) } - performers = append(performers, &performerDraft) + if phash := f.Fingerprints.GetInt64(file.FingerprintTypePhash); phash != 0 { + fingerprint := graphql.FingerprintInput{ + Hash: utils.PhashToString(phash), + Algorithm: graphql.FingerprintAlgorithmPhash, + Duration: int(duration), + } + fingerprints = append(fingerprints, &fingerprint) + } } - draft.Performers = performers + } + draft.Fingerprints = fingerprints - var tags []*graphql.DraftEntityInput - sceneTags, err := r.Tag().FindBySceneID(scene.ID) + scenePerformers, err := pqb.FindBySceneID(ctx, scene.ID) + if err != nil { + return nil, err + } + + performers := []*graphql.DraftEntityInput{} + for _, p := range scenePerformers { + performerDraft := graphql.DraftEntityInput{ + Name: p.Name.String, + } + + stashIDs, err := pqb.GetStashIDs(ctx, p.ID) if err != nil { - return err + return nil, err } - for _, tag := range sceneTags { - tags = append(tags, &graphql.DraftEntityInput{Name: tag.Name}) - } - draft.Tags = tags + for _, stashID := range stashIDs { + c := stashID + if stashID.Endpoint == endpoint { + performerDraft.ID = &c.StashID + break + } + } + + performers = append(performers, &performerDraft) + } + draft.Performers = performers + + var tags []*graphql.DraftEntityInput + sceneTags, err := r.Tag.FindBySceneID(ctx, scene.ID) + if err != nil { + return nil, err + } + for _, tag := range sceneTags { + tags = append(tags, &graphql.DraftEntityInput{Name: tag.Name}) + } + draft.Tags = tags + + if imagePath != "" { exists, _ := fsutil.FileExists(imagePath) if exists { file, err := os.Open(imagePath) @@ -823,28 +890,26 @@ func (c Client) SubmitSceneDraft(ctx context.Context, sceneID int, endpoint stri image = file } } + } - stashIDs, err := qb.GetStashIDs(sceneID) - if err != nil { - return err - } - var stashID *string - for _, v := range stashIDs { - if v.Endpoint == endpoint { - stashID = &v.StashID - break - } - } - draft.ID = stashID - - return nil - }); err != nil { + if err := scene.LoadStashIDs(ctx, r.Scene); err != nil { return nil, err } + stashIDs := scene.StashIDs.List() + var stashID *string + for _, v := range stashIDs { + if v.Endpoint == endpoint { + vv := v.StashID + stashID = &vv + break + } + } + draft.ID = stashID + var id *string var ret graphql.SubmitSceneDraft - err := c.submitDraft(ctx, graphql.SubmitSceneDraftDocument, draft, image, &ret) + err = c.submitDraft(ctx, graphql.SubmitSceneDraftDocument, draft, image, &ret) id = ret.SubmitSceneDraft.ID return id, err @@ -861,88 +926,83 @@ func (c Client) SubmitSceneDraft(ctx context.Context, sceneID int, endpoint stri func (c Client) SubmitPerformerDraft(ctx context.Context, performer *models.Performer, endpoint string) (*string, error) { draft := graphql.PerformerDraftInput{} var image io.Reader - if err := c.txnManager.WithReadTxn(ctx, func(r models.ReaderRepository) error { - pqb := r.Performer() - img, _ := pqb.GetImage(performer.ID) - if img != nil { - image = bytes.NewReader(img) - } + pqb := c.repository.Performer + img, _ := pqb.GetImage(ctx, performer.ID) + if img != nil { + image = bytes.NewReader(img) + } - if performer.Name.Valid { - draft.Name = performer.Name.String - } - if performer.Birthdate.Valid { - draft.Birthdate = &performer.Birthdate.String - } - if performer.Country.Valid { - draft.Country = &performer.Country.String - } - if performer.Ethnicity.Valid { - draft.Ethnicity = &performer.Ethnicity.String - } - if performer.EyeColor.Valid { - draft.EyeColor = &performer.EyeColor.String - } - if performer.FakeTits.Valid { - draft.BreastType = &performer.FakeTits.String - } - if performer.Gender.Valid { - draft.Gender = &performer.Gender.String - } - if performer.HairColor.Valid { - draft.HairColor = &performer.HairColor.String - } - if performer.Height.Valid { - draft.Height = &performer.Height.String - } - if performer.Measurements.Valid { - draft.Measurements = &performer.Measurements.String - } - if performer.Piercings.Valid { - draft.Piercings = &performer.Piercings.String - } - if performer.Tattoos.Valid { - draft.Tattoos = &performer.Tattoos.String - } - if performer.Aliases.Valid { - draft.Aliases = &performer.Aliases.String - } + if performer.Name.Valid { + draft.Name = performer.Name.String + } + if performer.Birthdate.Valid { + draft.Birthdate = &performer.Birthdate.String + } + if performer.Country.Valid { + draft.Country = &performer.Country.String + } + if performer.Ethnicity.Valid { + draft.Ethnicity = &performer.Ethnicity.String + } + if performer.EyeColor.Valid { + draft.EyeColor = &performer.EyeColor.String + } + if performer.FakeTits.Valid { + draft.BreastType = &performer.FakeTits.String + } + if performer.Gender.Valid { + draft.Gender = &performer.Gender.String + } + if performer.HairColor.Valid { + draft.HairColor = &performer.HairColor.String + } + if performer.Height.Valid { + draft.Height = &performer.Height.String + } + if performer.Measurements.Valid { + draft.Measurements = &performer.Measurements.String + } + if performer.Piercings.Valid { + draft.Piercings = &performer.Piercings.String + } + if performer.Tattoos.Valid { + draft.Tattoos = &performer.Tattoos.String + } + if performer.Aliases.Valid { + draft.Aliases = &performer.Aliases.String + } - var urls []string - if len(strings.TrimSpace(performer.Twitter.String)) > 0 { - urls = append(urls, "https://twitter.com/"+strings.TrimSpace(performer.Twitter.String)) - } - if len(strings.TrimSpace(performer.Instagram.String)) > 0 { - urls = append(urls, "https://instagram.com/"+strings.TrimSpace(performer.Instagram.String)) - } - if len(strings.TrimSpace(performer.URL.String)) > 0 { - urls = append(urls, strings.TrimSpace(performer.URL.String)) - } - if len(urls) > 0 { - draft.Urls = urls - } + var urls []string + if len(strings.TrimSpace(performer.Twitter.String)) > 0 { + urls = append(urls, "https://twitter.com/"+strings.TrimSpace(performer.Twitter.String)) + } + if len(strings.TrimSpace(performer.Instagram.String)) > 0 { + urls = append(urls, "https://instagram.com/"+strings.TrimSpace(performer.Instagram.String)) + } + if len(strings.TrimSpace(performer.URL.String)) > 0 { + urls = append(urls, strings.TrimSpace(performer.URL.String)) + } + if len(urls) > 0 { + draft.Urls = urls + } - stashIDs, err := pqb.GetStashIDs(performer.ID) - if err != nil { - return err - } - var stashID *string - for _, v := range stashIDs { - if v.Endpoint == endpoint { - stashID = &v.StashID - break - } - } - draft.ID = stashID - - return nil - }); err != nil { + stashIDs, err := pqb.GetStashIDs(ctx, performer.ID) + if err != nil { return nil, err } + var stashID *string + for _, v := range stashIDs { + c := v + if v.Endpoint == endpoint { + stashID = &c.StashID + break + } + } + draft.ID = stashID var id *string var ret graphql.SubmitPerformerDraft - err := c.submitDraft(ctx, graphql.SubmitPerformerDraftDocument, draft, image, &ret) + err = c.submitDraft(ctx, graphql.SubmitPerformerDraftDocument, draft, image, &ret) id = ret.SubmitPerformerDraft.ID return id, err diff --git a/pkg/scraper/xpath.go b/pkg/scraper/xpath.go index 79300d30b..29a4b0a19 100644 --- a/pkg/scraper/xpath.go +++ b/pkg/scraper/xpath.go @@ -23,16 +23,14 @@ type xpathScraper struct { config config globalConfig GlobalConfig client *http.Client - txnManager models.TransactionManager } -func newXpathScraper(scraper scraperTypeConfig, client *http.Client, txnManager models.TransactionManager, config config, globalConfig GlobalConfig) *xpathScraper { +func newXpathScraper(scraper scraperTypeConfig, client *http.Client, config config, globalConfig GlobalConfig) *xpathScraper { return &xpathScraper{ scraper: scraper, config: config, globalConfig: globalConfig, client: client, - txnManager: txnManager, } } @@ -56,7 +54,7 @@ func (s *xpathScraper) scrapeURL(ctx context.Context, url string) (*html.Node, * return doc, scraper, nil } -func (s *xpathScraper) scrapeByURL(ctx context.Context, url string, ty models.ScrapeContentType) (models.ScrapedContent, error) { +func (s *xpathScraper) scrapeByURL(ctx context.Context, url string, ty ScrapeContentType) (ScrapedContent, error) { u := replaceURL(url, s.scraper) // allow a URL Replace for performer by URL queries doc, scraper, err := s.scrapeURL(ctx, u) if err != nil { @@ -65,20 +63,20 @@ func (s *xpathScraper) scrapeByURL(ctx context.Context, url string, ty models.Sc q := s.getXPathQuery(doc) switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: return scraper.scrapePerformer(ctx, q) - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: return scraper.scrapeScene(ctx, q) - case models.ScrapeContentTypeGallery: + case ScrapeContentTypeGallery: return scraper.scrapeGallery(ctx, q) - case models.ScrapeContentTypeMovie: + case ScrapeContentTypeMovie: return scraper.scrapeMovie(ctx, q) } return nil, ErrNotSupported } -func (s *xpathScraper) scrapeByName(ctx context.Context, name string, ty models.ScrapeContentType) ([]models.ScrapedContent, error) { +func (s *xpathScraper) scrapeByName(ctx context.Context, name string, ty ScrapeContentType) ([]ScrapedContent, error) { scraper := s.getXpathScraper() if scraper == nil { @@ -102,9 +100,9 @@ func (s *xpathScraper) scrapeByName(ctx context.Context, name string, ty models. q := s.getXPathQuery(doc) q.setType(SearchQuery) - var content []models.ScrapedContent + var content []ScrapedContent switch ty { - case models.ScrapeContentTypePerformer: + case ScrapeContentTypePerformer: performers, err := scraper.scrapePerformers(ctx, q) if err != nil { return nil, err @@ -114,7 +112,7 @@ func (s *xpathScraper) scrapeByName(ctx context.Context, name string, ty models. } return content, nil - case models.ScrapeContentTypeScene: + case ScrapeContentTypeScene: scenes, err := scraper.scrapeScenes(ctx, q) if err != nil { return nil, err @@ -129,7 +127,7 @@ func (s *xpathScraper) scrapeByName(ctx context.Context, name string, ty models. return nil, ErrNotSupported } -func (s *xpathScraper) scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*models.ScrapedScene, error) { +func (s *xpathScraper) scrapeSceneByScene(ctx context.Context, scene *models.Scene) (*ScrapedScene, error) { // construct the URL queryURL := queryURLParametersFromScene(scene) if s.scraper.QueryURLReplacements != nil { @@ -153,7 +151,7 @@ func (s *xpathScraper) scrapeSceneByScene(ctx context.Context, scene *models.Sce return scraper.scrapeScene(ctx, q) } -func (s *xpathScraper) scrapeByFragment(ctx context.Context, input Input) (models.ScrapedContent, error) { +func (s *xpathScraper) scrapeByFragment(ctx context.Context, input Input) (ScrapedContent, error) { switch { case input.Gallery != nil: return nil, fmt.Errorf("%w: cannot use an xpath scraper as a gallery fragment scraper", ErrNotSupported) @@ -188,7 +186,7 @@ func (s *xpathScraper) scrapeByFragment(ctx context.Context, input Input) (model return scraper.scrapeScene(ctx, q) } -func (s *xpathScraper) scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*models.ScrapedGallery, error) { +func (s *xpathScraper) scrapeGalleryByGallery(ctx context.Context, gallery *models.Gallery) (*ScrapedGallery, error) { // construct the URL queryURL := queryURLParametersFromGallery(gallery) if s.scraper.QueryURLReplacements != nil { diff --git a/pkg/scraper/xpath_test.go b/pkg/scraper/xpath_test.go index 782b753f0..7120f8574 100644 --- a/pkg/scraper/xpath_test.go +++ b/pkg/scraper/xpath_test.go @@ -885,12 +885,12 @@ xPathScrapers: client := &http.Client{} ctx := context.Background() - s := newGroupScraper(*c, nil, globalConfig) + s := newGroupScraper(*c, globalConfig) us, ok := s.(urlScraper) if !ok { t.Error("couldn't convert scraper into url scraper") } - content, err := us.viaURL(ctx, client, ts.URL, models.ScrapeContentTypePerformer) + content, err := us.viaURL(ctx, client, ts.URL, ScrapeContentTypePerformer) if err != nil { t.Errorf("Error scraping performer: %s", err.Error()) diff --git a/pkg/sliceutil/intslice/int_collections.go b/pkg/sliceutil/intslice/int_collections.go index daf5b3d11..6213c41e3 100644 --- a/pkg/sliceutil/intslice/int_collections.go +++ b/pkg/sliceutil/intslice/int_collections.go @@ -53,6 +53,18 @@ func IntExclude(vs []int, toExclude []int) []int { return ret } +// IntIntercect returns a slice of ints containing values that exist in both provided slices. +func IntIntercect(v1, v2 []int) []int { + var ret []int + for _, v := range v1 { + if IntInclude(v2, v) { + ret = append(ret, v) + } + } + + return ret +} + // IntSliceToStringSlice converts a slice of ints to a slice of strings. func IntSliceToStringSlice(ss []int) []string { ret := make([]string, len(ss)) diff --git a/pkg/sqlite/common.go b/pkg/sqlite/common.go new file mode 100644 index 000000000..8874fb1b4 --- /dev/null +++ b/pkg/sqlite/common.go @@ -0,0 +1,75 @@ +package sqlite + +import ( + "context" + "fmt" + + "github.com/doug-martin/goqu/v9" + "github.com/jmoiron/sqlx" +) + +type oCounterManager struct { + tableMgr *table +} + +func (qb *oCounterManager) getOCounter(ctx context.Context, id int) (int, error) { + q := dialect.From(qb.tableMgr.table).Select("o_counter").Where(goqu.Ex{"id": id}) + + const single = true + var ret int + if err := queryFunc(ctx, q, single, func(rows *sqlx.Rows) error { + if err := rows.Scan(&ret); err != nil { + return err + } + return nil + }); err != nil { + return 0, err + } + + return ret, nil +} + +func (qb *oCounterManager) IncrementOCounter(ctx context.Context, id int) (int, error) { + if err := qb.tableMgr.checkIDExists(ctx, id); err != nil { + return 0, err + } + + if err := qb.tableMgr.updateByID(ctx, id, goqu.Record{ + "o_counter": goqu.L("o_counter + 1"), + }); err != nil { + return 0, err + } + + return qb.getOCounter(ctx, id) +} + +func (qb *oCounterManager) DecrementOCounter(ctx context.Context, id int) (int, error) { + if err := qb.tableMgr.checkIDExists(ctx, id); err != nil { + return 0, err + } + + table := qb.tableMgr.table + q := dialect.Update(table).Set(goqu.Record{ + "o_counter": goqu.L("o_counter - 1"), + }).Where(qb.tableMgr.byID(id), goqu.L("o_counter > 0")) + + if _, err := exec(ctx, q); err != nil { + return 0, fmt.Errorf("updating %s: %w", table.GetTable(), err) + } + + return qb.getOCounter(ctx, id) +} + +func (qb *oCounterManager) ResetOCounter(ctx context.Context, id int) (int, error) { + if err := qb.tableMgr.checkIDExists(ctx, id); err != nil { + return 0, err + } + + if err := qb.tableMgr.updateByID(ctx, id, goqu.Record{ + "o_counter": 0, + }); err != nil { + return 0, err + } + + return qb.getOCounter(ctx, id) +} diff --git a/pkg/sqlite/custom_migrations.go b/pkg/sqlite/custom_migrations.go new file mode 100644 index 000000000..bbd7aa67d --- /dev/null +++ b/pkg/sqlite/custom_migrations.go @@ -0,0 +1,24 @@ +package sqlite + +import ( + "context" + + "github.com/jmoiron/sqlx" +) + +type customMigrationFunc func(ctx context.Context, db *sqlx.DB) error + +func RegisterPostMigration(schemaVersion uint, fn customMigrationFunc) { + v := postMigrations[schemaVersion] + v = append(v, fn) + postMigrations[schemaVersion] = v +} + +func RegisterPreMigration(schemaVersion uint, fn customMigrationFunc) { + v := preMigrations[schemaVersion] + v = append(v, fn) + preMigrations[schemaVersion] = v +} + +var postMigrations = make(map[uint][]customMigrationFunc) +var preMigrations = make(map[uint][]customMigrationFunc) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go new file mode 100644 index 000000000..d73abb977 --- /dev/null +++ b/pkg/sqlite/database.go @@ -0,0 +1,420 @@ +package sqlite + +import ( + "context" + "database/sql" + "embed" + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/fvbommel/sortorder" + "github.com/golang-migrate/migrate/v4" + sqlite3mig "github.com/golang-migrate/migrate/v4/database/sqlite3" + "github.com/golang-migrate/migrate/v4/source/iofs" + "github.com/jmoiron/sqlx" + sqlite3 "github.com/mattn/go-sqlite3" + + "github.com/stashapp/stash/pkg/fsutil" + "github.com/stashapp/stash/pkg/logger" +) + +var appSchemaVersion uint = 32 + +//go:embed migrations/*.sql +var migrationsBox embed.FS + +var ( + // ErrDatabaseNotInitialized indicates that the database is not + // initialized, usually due to an incomplete configuration. + ErrDatabaseNotInitialized = errors.New("database not initialized") +) + +// ErrMigrationNeeded indicates that a database migration is needed +// before the database can be initialized +type MigrationNeededError struct { + CurrentSchemaVersion uint + RequiredSchemaVersion uint +} + +func (e *MigrationNeededError) Error() string { + return fmt.Sprintf("database schema version %d does not match required schema version %d", e.CurrentSchemaVersion, e.RequiredSchemaVersion) +} + +type MismatchedSchemaVersionError struct { + CurrentSchemaVersion uint + RequiredSchemaVersion uint +} + +func (e *MismatchedSchemaVersionError) Error() string { + return fmt.Sprintf("schema version %d is incompatible with required schema version %d", e.CurrentSchemaVersion, e.RequiredSchemaVersion) +} + +const sqlite3Driver = "sqlite3ex" + +func init() { + // register custom driver with regexp function + registerCustomDriver() +} + +type Database struct { + File *FileStore + Folder *FolderStore + Image *ImageStore + Gallery *GalleryStore + Scene *SceneStore + + db *sqlx.DB + dbPath string + + schemaVersion uint + + writeMu sync.Mutex +} + +func NewDatabase() *Database { + fileStore := NewFileStore() + folderStore := NewFolderStore() + + ret := &Database{ + File: fileStore, + Folder: folderStore, + Scene: NewSceneStore(fileStore), + Image: NewImageStore(fileStore), + Gallery: NewGalleryStore(fileStore, folderStore), + } + + return ret +} + +// Ready returns an error if the database is not ready to begin transactions. +func (db *Database) Ready() error { + if db.db == nil { + return ErrDatabaseNotInitialized + } + + return nil +} + +// Open initializes the database. If the database is new, then it +// performs a full migration to the latest schema version. Otherwise, any +// necessary migrations must be run separately using RunMigrations. +// Returns true if the database is new. +func (db *Database) Open(dbPath string) error { + db.writeMu.Lock() + defer db.writeMu.Unlock() + + db.dbPath = dbPath + + databaseSchemaVersion, err := db.getDatabaseSchemaVersion() + if err != nil { + return fmt.Errorf("getting database schema version: %w", err) + } + + db.schemaVersion = databaseSchemaVersion + + if databaseSchemaVersion == 0 { + // new database, just run the migrations + if err := db.RunMigrations(); err != nil { + return fmt.Errorf("error running initial schema migrations: %v", err) + } + } else { + if databaseSchemaVersion > appSchemaVersion { + return &MismatchedSchemaVersionError{ + CurrentSchemaVersion: databaseSchemaVersion, + RequiredSchemaVersion: appSchemaVersion, + } + } + + // if migration is needed, then don't open the connection + if db.needsMigration() { + return &MigrationNeededError{ + CurrentSchemaVersion: databaseSchemaVersion, + RequiredSchemaVersion: appSchemaVersion, + } + } + } + + // RunMigrations may have opened a connection already + if db.db == nil { + const disableForeignKeys = false + db.db, err = db.open(disableForeignKeys) + if err != nil { + return err + } + } + + return nil +} + +func (db *Database) Close() error { + db.writeMu.Lock() + defer db.writeMu.Unlock() + + if db.db != nil { + if err := db.db.Close(); err != nil { + return err + } + + db.db = nil + } + + return nil +} + +func (db *Database) open(disableForeignKeys bool) (*sqlx.DB, error) { + // https://github.com/mattn/go-sqlite3 + url := "file:" + db.dbPath + "?_journal=WAL&_sync=NORMAL" + if !disableForeignKeys { + url += "&_fk=true" + } + + conn, err := sqlx.Open(sqlite3Driver, url) + conn.SetMaxOpenConns(25) + conn.SetMaxIdleConns(4) + conn.SetConnMaxLifetime(30 * time.Second) + if err != nil { + return nil, fmt.Errorf("db.Open(): %w", err) + } + + return conn, nil +} + +func (db *Database) Reset() error { + databasePath := db.dbPath + err := db.Close() + + if err != nil { + return errors.New("Error closing database: " + err.Error()) + } + + err = os.Remove(databasePath) + if err != nil { + return errors.New("Error removing database: " + err.Error()) + } + + // remove the -shm, -wal files ( if they exist ) + walFiles := []string{databasePath + "-shm", databasePath + "-wal"} + for _, wf := range walFiles { + if exists, _ := fsutil.FileExists(wf); exists { + err = os.Remove(wf) + if err != nil { + return errors.New("Error removing database: " + err.Error()) + } + } + } + + if err := db.Open(databasePath); err != nil { + return fmt.Errorf("[reset DB] unable to initialize: %w", err) + } + + return nil +} + +// Backup the database. If db is nil, then uses the existing database +// connection. +func (db *Database) Backup(backupPath string) error { + thisDB := db.db + if thisDB == nil { + var err error + thisDB, err = sqlx.Connect(sqlite3Driver, "file:"+db.dbPath+"?_fk=true") + if err != nil { + return fmt.Errorf("open database %s failed: %v", db.dbPath, err) + } + defer thisDB.Close() + } + + logger.Infof("Backing up database into: %s", backupPath) + _, err := thisDB.Exec(`VACUUM INTO "` + backupPath + `"`) + if err != nil { + return fmt.Errorf("vacuum failed: %v", err) + } + + return nil +} + +func (db *Database) RestoreFromBackup(backupPath string) error { + logger.Infof("Restoring backup database %s into %s", backupPath, db.dbPath) + return os.Rename(backupPath, db.dbPath) +} + +// Migrate the database +func (db *Database) needsMigration() bool { + return db.schemaVersion != appSchemaVersion +} + +func (db *Database) AppSchemaVersion() uint { + return appSchemaVersion +} + +func (db *Database) DatabasePath() string { + return db.dbPath +} + +func (db *Database) DatabaseBackupPath() string { + return fmt.Sprintf("%s.%d.%s", db.dbPath, db.schemaVersion, time.Now().Format("20060102_150405")) +} + +func (db *Database) Version() uint { + return db.schemaVersion +} + +func (db *Database) getMigrate() (*migrate.Migrate, error) { + migrations, err := iofs.New(migrationsBox, "migrations") + if err != nil { + return nil, err + } + + const disableForeignKeys = true + conn, err := db.open(disableForeignKeys) + if err != nil { + return nil, err + } + + driver, err := sqlite3mig.WithInstance(conn.DB, &sqlite3mig.Config{}) + if err != nil { + return nil, err + } + + // use sqlite3Driver so that migration has access to durationToTinyInt + return migrate.NewWithInstance( + "iofs", + migrations, + db.dbPath, + driver, + ) +} + +func (db *Database) getDatabaseSchemaVersion() (uint, error) { + m, err := db.getMigrate() + if err != nil { + return 0, err + } + defer m.Close() + + ret, _, _ := m.Version() + return ret, nil +} + +// Migrate the database +func (db *Database) RunMigrations() error { + ctx := context.Background() + + m, err := db.getMigrate() + if err != nil { + return err + } + defer m.Close() + + databaseSchemaVersion, _, _ := m.Version() + stepNumber := appSchemaVersion - databaseSchemaVersion + if stepNumber != 0 { + logger.Infof("Migrating database from version %d to %d", databaseSchemaVersion, appSchemaVersion) + + // run each migration individually, and run custom migrations as needed + var i uint = 1 + for ; i <= stepNumber; i++ { + newVersion := databaseSchemaVersion + i + + // run pre migrations as needed + if err := db.runCustomMigrations(ctx, preMigrations[newVersion]); err != nil { + return fmt.Errorf("running pre migrations for schema version %d: %w", newVersion, err) + } + + err = m.Steps(1) + if err != nil { + // migration failed + return err + } + + // run post migrations as needed + if err := db.runCustomMigrations(ctx, postMigrations[newVersion]); err != nil { + return fmt.Errorf("running post migrations for schema version %d: %w", newVersion, err) + } + } + } + + // update the schema version + db.schemaVersion, _, _ = m.Version() + + // re-initialise the database + const disableForeignKeys = false + db.db, err = db.open(disableForeignKeys) + if err != nil { + return fmt.Errorf("re-initializing the database: %w", err) + } + + // optimize database after migration + logger.Info("Optimizing database") + _, err = db.db.Exec("ANALYZE") + if err != nil { + logger.Warnf("error while performing post-migration optimization: %v", err) + } + _, err = db.db.Exec("VACUUM") + if err != nil { + logger.Warnf("error while performing post-migration vacuum: %v", err) + } + + return nil +} + +func (db *Database) runCustomMigrations(ctx context.Context, fns []customMigrationFunc) error { + for _, fn := range fns { + if err := db.runCustomMigration(ctx, fn); err != nil { + return err + } + } + + return nil +} + +func (db *Database) runCustomMigration(ctx context.Context, fn customMigrationFunc) error { + const disableForeignKeys = false + d, err := db.open(disableForeignKeys) + if err != nil { + return err + } + + defer d.Close() + if err := fn(ctx, d); err != nil { + return err + } + + return nil +} + +func registerCustomDriver() { + sql.Register(sqlite3Driver, + &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + funcs := map[string]interface{}{ + "regexp": regexFn, + "durationToTinyInt": durationToTinyIntFn, + } + + for name, fn := range funcs { + if err := conn.RegisterFunc(name, fn, true); err != nil { + return fmt.Errorf("error registering function %s: %s", name, err.Error()) + } + } + + // COLLATE NATURAL_CS - Case sensitive natural sort + err := conn.RegisterCollation("NATURAL_CS", func(s string, s2 string) int { + if sortorder.NaturalLess(s, s2) { + return -1 + } else { + return 1 + } + }) + + if err != nil { + return fmt.Errorf("error registering natural sort collation: %v", err) + } + + return nil + }, + }, + ) +} diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go new file mode 100644 index 000000000..7bfcd7804 --- /dev/null +++ b/pkg/sqlite/file.go @@ -0,0 +1,848 @@ +package sqlite + +import ( + "context" + "database/sql" + "errors" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" + "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/models" + "gopkg.in/guregu/null.v4" +) + +const ( + fileTable = "files" + videoFileTable = "video_files" + imageFileTable = "image_files" + fileIDColumn = "file_id" + + videoCaptionsTable = "video_captions" + captionCodeColumn = "language_code" + captionFilenameColumn = "filename" + captionTypeColumn = "caption_type" +) + +type basicFileRow struct { + ID file.ID `db:"id" goqu:"skipinsert"` + Basename string `db:"basename"` + ZipFileID null.Int `db:"zip_file_id"` + ParentFolderID file.FolderID `db:"parent_folder_id"` + Size int64 `db:"size"` + ModTime time.Time `db:"mod_time"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +func (r *basicFileRow) fromBasicFile(o file.BaseFile) { + r.ID = o.ID + r.Basename = o.Basename + r.ZipFileID = nullIntFromFileIDPtr(o.ZipFileID) + r.ParentFolderID = o.ParentFolderID + r.Size = o.Size + r.ModTime = o.ModTime + r.CreatedAt = o.CreatedAt + r.UpdatedAt = o.UpdatedAt +} + +type videoFileRow struct { + FileID file.ID `db:"file_id"` + Format string `db:"format"` + Width int `db:"width"` + Height int `db:"height"` + Duration float64 `db:"duration"` + VideoCodec string `db:"video_codec"` + AudioCodec string `db:"audio_codec"` + FrameRate float64 `db:"frame_rate"` + BitRate int64 `db:"bit_rate"` + Interactive bool `db:"interactive"` + InteractiveSpeed null.Int `db:"interactive_speed"` +} + +func (f *videoFileRow) fromVideoFile(ff file.VideoFile) { + f.FileID = ff.ID + f.Format = ff.Format + f.Width = ff.Width + f.Height = ff.Height + f.Duration = ff.Duration + f.VideoCodec = ff.VideoCodec + f.AudioCodec = ff.AudioCodec + f.FrameRate = ff.FrameRate + f.BitRate = ff.BitRate + f.Interactive = ff.Interactive + f.InteractiveSpeed = intFromPtr(ff.InteractiveSpeed) +} + +type imageFileRow struct { + FileID file.ID `db:"file_id"` + Format string `db:"format"` + Width int `db:"width"` + Height int `db:"height"` +} + +func (f *imageFileRow) fromImageFile(ff file.ImageFile) { + f.FileID = ff.ID + f.Format = ff.Format + f.Width = ff.Width + f.Height = ff.Height +} + +// we redefine this to change the columns around +// otherwise, we collide with the image file columns +type videoFileQueryRow struct { + FileID null.Int `db:"file_id_video"` + Format null.String `db:"video_format"` + Width null.Int `db:"video_width"` + Height null.Int `db:"video_height"` + Duration null.Float `db:"duration"` + VideoCodec null.String `db:"video_codec"` + AudioCodec null.String `db:"audio_codec"` + FrameRate null.Float `db:"frame_rate"` + BitRate null.Int `db:"bit_rate"` + Interactive null.Bool `db:"interactive"` + InteractiveSpeed null.Int `db:"interactive_speed"` +} + +func (f *videoFileQueryRow) resolve() *file.VideoFile { + return &file.VideoFile{ + Format: f.Format.String, + Width: int(f.Width.Int64), + Height: int(f.Height.Int64), + Duration: f.Duration.Float64, + VideoCodec: f.VideoCodec.String, + AudioCodec: f.AudioCodec.String, + FrameRate: f.FrameRate.Float64, + BitRate: f.BitRate.Int64, + Interactive: f.Interactive.Bool, + InteractiveSpeed: nullIntPtr(f.InteractiveSpeed), + } +} + +func videoFileQueryColumns() []interface{} { + table := videoFileTableMgr.table + return []interface{}{ + table.Col("file_id").As("file_id_video"), + table.Col("format").As("video_format"), + table.Col("width").As("video_width"), + table.Col("height").As("video_height"), + table.Col("duration"), + table.Col("video_codec"), + table.Col("audio_codec"), + table.Col("frame_rate"), + table.Col("bit_rate"), + table.Col("interactive"), + table.Col("interactive_speed"), + } +} + +// we redefine this to change the columns around +// otherwise, we collide with the video file columns +type imageFileQueryRow struct { + Format null.String `db:"image_format"` + Width null.Int `db:"image_width"` + Height null.Int `db:"image_height"` +} + +func (imageFileQueryRow) columns(table *table) []interface{} { + ex := table.table + return []interface{}{ + ex.Col("format").As("image_format"), + ex.Col("width").As("image_width"), + ex.Col("height").As("image_height"), + } +} + +func (f *imageFileQueryRow) resolve() *file.ImageFile { + return &file.ImageFile{ + Format: f.Format.String, + Width: int(f.Width.Int64), + Height: int(f.Height.Int64), + } +} + +type fileQueryRow struct { + FileID null.Int `db:"file_id"` + Basename null.String `db:"basename"` + ZipFileID null.Int `db:"zip_file_id"` + ParentFolderID null.Int `db:"parent_folder_id"` + Size null.Int `db:"size"` + ModTime null.Time `db:"mod_time"` + CreatedAt null.Time `db:"file_created_at"` + UpdatedAt null.Time `db:"file_updated_at"` + + ZipBasename null.String `db:"zip_basename"` + ZipFolderPath null.String `db:"zip_folder_path"` + + FolderPath null.String `db:"parent_folder_path"` + fingerprintQueryRow + videoFileQueryRow + imageFileQueryRow +} + +func (r *fileQueryRow) resolve() file.File { + basic := &file.BaseFile{ + ID: file.ID(r.FileID.Int64), + DirEntry: file.DirEntry{ + ZipFileID: nullIntFileIDPtr(r.ZipFileID), + ModTime: r.ModTime.Time, + }, + Path: filepath.Join(r.FolderPath.String, r.Basename.String), + ParentFolderID: file.FolderID(r.ParentFolderID.Int64), + Basename: r.Basename.String, + Size: r.Size.Int64, + CreatedAt: r.CreatedAt.Time, + UpdatedAt: r.UpdatedAt.Time, + } + + if basic.ZipFileID != nil && r.ZipFolderPath.Valid && r.ZipBasename.Valid { + basic.ZipFile = &file.BaseFile{ + ID: *basic.ZipFileID, + Path: filepath.Join(r.ZipFolderPath.String, r.ZipBasename.String), + Basename: r.ZipBasename.String, + } + } + + var ret file.File = basic + + if r.videoFileQueryRow.Format.Valid { + vf := r.videoFileQueryRow.resolve() + vf.BaseFile = basic + ret = vf + } + + if r.imageFileQueryRow.Format.Valid { + imf := r.imageFileQueryRow.resolve() + imf.BaseFile = basic + ret = imf + } + + r.appendRelationships(basic) + + return ret +} + +func appendFingerprintsUnique(vs []file.Fingerprint, v ...file.Fingerprint) []file.Fingerprint { + for _, vv := range v { + found := false + for _, vsv := range vs { + if vsv.Type == vv.Type { + found = true + break + } + } + + if !found { + vs = append(vs, vv) + } + } + return vs +} + +func (r *fileQueryRow) appendRelationships(i *file.BaseFile) { + if r.fingerprintQueryRow.valid() { + i.Fingerprints = appendFingerprintsUnique(i.Fingerprints, r.fingerprintQueryRow.resolve()) + } +} + +type fileQueryRows []fileQueryRow + +func (r fileQueryRows) resolve() []file.File { + var ret []file.File + var last file.File + var lastID file.ID + + for _, row := range r { + if last == nil || lastID != file.ID(row.FileID.Int64) { + f := row.resolve() + last = f + lastID = file.ID(row.FileID.Int64) + ret = append(ret, last) + continue + } + + // must be merging with previous row + row.appendRelationships(last.Base()) + } + + return ret +} + +type FileStore struct { + repository + + tableMgr *table +} + +func NewFileStore() *FileStore { + return &FileStore{ + repository: repository{ + tableName: sceneTable, + idColumn: idColumn, + }, + + tableMgr: fileTableMgr, + } +} + +func (qb *FileStore) table() exp.IdentifierExpression { + return qb.tableMgr.table +} + +func (qb *FileStore) Create(ctx context.Context, f file.File) error { + var r basicFileRow + r.fromBasicFile(*f.Base()) + + id, err := qb.tableMgr.insertID(ctx, r) + if err != nil { + return err + } + + fileID := file.ID(id) + + // create extended stuff here + switch ef := f.(type) { + case *file.VideoFile: + if err := qb.createVideoFile(ctx, fileID, *ef); err != nil { + return err + } + case *file.ImageFile: + if err := qb.createImageFile(ctx, fileID, *ef); err != nil { + return err + } + } + + if err := FingerprintReaderWriter.insertJoins(ctx, fileID, f.Base().Fingerprints); err != nil { + return err + } + + updated, err := qb.Find(ctx, fileID) + if err != nil { + return fmt.Errorf("finding after create: %w", err) + } + + base := f.Base() + *base = *updated[0].Base() + + return nil +} + +func (qb *FileStore) Update(ctx context.Context, f file.File) error { + var r basicFileRow + r.fromBasicFile(*f.Base()) + + id := f.Base().ID + + if err := qb.tableMgr.updateByID(ctx, id, r); err != nil { + return err + } + + // create extended stuff here + switch ef := f.(type) { + case *file.VideoFile: + if err := qb.updateOrCreateVideoFile(ctx, id, *ef); err != nil { + return err + } + case *file.ImageFile: + if err := qb.updateOrCreateImageFile(ctx, id, *ef); err != nil { + return err + } + } + + if err := FingerprintReaderWriter.replaceJoins(ctx, id, f.Base().Fingerprints); err != nil { + return err + } + + return nil +} + +func (qb *FileStore) Destroy(ctx context.Context, id file.ID) error { + return qb.tableMgr.destroyExisting(ctx, []int{int(id)}) +} + +func (qb *FileStore) createVideoFile(ctx context.Context, id file.ID, f file.VideoFile) error { + var r videoFileRow + r.fromVideoFile(f) + r.FileID = id + if _, err := videoFileTableMgr.insert(ctx, r); err != nil { + return err + } + + return nil +} + +func (qb *FileStore) updateOrCreateVideoFile(ctx context.Context, id file.ID, f file.VideoFile) error { + exists, err := videoFileTableMgr.idExists(ctx, id) + if err != nil { + return err + } + + if !exists { + return qb.createVideoFile(ctx, id, f) + } + + var r videoFileRow + r.fromVideoFile(f) + r.FileID = id + if err := videoFileTableMgr.updateByID(ctx, id, r); err != nil { + return err + } + + return nil +} + +func (qb *FileStore) createImageFile(ctx context.Context, id file.ID, f file.ImageFile) error { + var r imageFileRow + r.fromImageFile(f) + r.FileID = id + if _, err := imageFileTableMgr.insert(ctx, r); err != nil { + return err + } + + return nil +} + +func (qb *FileStore) updateOrCreateImageFile(ctx context.Context, id file.ID, f file.ImageFile) error { + exists, err := imageFileTableMgr.idExists(ctx, id) + if err != nil { + return err + } + + if !exists { + return qb.createImageFile(ctx, id, f) + } + + var r imageFileRow + r.fromImageFile(f) + r.FileID = id + if err := imageFileTableMgr.updateByID(ctx, id, r); err != nil { + return err + } + + return nil +} + +func (qb *FileStore) selectDataset() *goqu.SelectDataset { + table := qb.table() + + folderTable := folderTableMgr.table + fingerprintTable := fingerprintTableMgr.table + videoFileTable := videoFileTableMgr.table + imageFileTable := imageFileTableMgr.table + + zipFileTable := table.As("zip_files") + zipFolderTable := folderTable.As("zip_files_folders") + + cols := []interface{}{ + table.Col("id").As("file_id"), + table.Col("basename"), + table.Col("zip_file_id"), + table.Col("parent_folder_id"), + table.Col("size"), + table.Col("mod_time"), + table.Col("created_at").As("file_created_at"), + table.Col("updated_at").As("file_updated_at"), + folderTable.Col("path").As("parent_folder_path"), + fingerprintTable.Col("type").As("fingerprint_type"), + fingerprintTable.Col("fingerprint"), + zipFileTable.Col("basename").As("zip_basename"), + zipFolderTable.Col("path").As("zip_folder_path"), + } + + cols = append(cols, videoFileQueryColumns()...) + cols = append(cols, imageFileQueryRow{}.columns(imageFileTableMgr)...) + + ret := dialect.From(table).Select(cols...) + + return ret.InnerJoin( + folderTable, + goqu.On(table.Col("parent_folder_id").Eq(folderTable.Col(idColumn))), + ).LeftJoin( + fingerprintTable, + goqu.On(table.Col(idColumn).Eq(fingerprintTable.Col(fileIDColumn))), + ).LeftJoin( + videoFileTable, + goqu.On(table.Col(idColumn).Eq(videoFileTable.Col(fileIDColumn))), + ).LeftJoin( + imageFileTable, + goqu.On(table.Col(idColumn).Eq(imageFileTable.Col(fileIDColumn))), + ).LeftJoin( + zipFileTable, + goqu.On(table.Col("zip_file_id").Eq(zipFileTable.Col("id"))), + ).LeftJoin( + zipFolderTable, + goqu.On(zipFileTable.Col("parent_folder_id").Eq(zipFolderTable.Col(idColumn))), + ) +} + +func (qb *FileStore) countDataset() *goqu.SelectDataset { + table := qb.table() + + folderTable := folderTableMgr.table + fingerprintTable := fingerprintTableMgr.table + videoFileTable := videoFileTableMgr.table + imageFileTable := imageFileTableMgr.table + + zipFileTable := table.As("zip_files") + zipFolderTable := folderTable.As("zip_files_folders") + + ret := dialect.From(table).Select(goqu.COUNT(goqu.DISTINCT(table.Col("id")))) + + return ret.InnerJoin( + folderTable, + goqu.On(table.Col("parent_folder_id").Eq(folderTable.Col(idColumn))), + ).LeftJoin( + fingerprintTable, + goqu.On(table.Col(idColumn).Eq(fingerprintTable.Col(fileIDColumn))), + ).LeftJoin( + videoFileTable, + goqu.On(table.Col(idColumn).Eq(videoFileTable.Col(fileIDColumn))), + ).LeftJoin( + imageFileTable, + goqu.On(table.Col(idColumn).Eq(imageFileTable.Col(fileIDColumn))), + ).LeftJoin( + zipFileTable, + goqu.On(table.Col("zip_file_id").Eq(zipFileTable.Col("id"))), + ).LeftJoin( + zipFolderTable, + goqu.On(zipFileTable.Col("parent_folder_id").Eq(zipFolderTable.Col(idColumn))), + ) +} + +func (qb *FileStore) get(ctx context.Context, q *goqu.SelectDataset) (file.File, error) { + ret, err := qb.getMany(ctx, q) + if err != nil { + return nil, err + } + + if len(ret) == 0 { + return nil, sql.ErrNoRows + } + + return ret[0], nil +} + +func (qb *FileStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]file.File, error) { + const single = false + var rows fileQueryRows + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f fileQueryRow + if err := r.StructScan(&f); err != nil { + return err + } + + rows = append(rows, f) + return nil + }); err != nil { + return nil, err + } + + return rows.resolve(), nil +} + +func (qb *FileStore) Find(ctx context.Context, ids ...file.ID) ([]file.File, error) { + var files []file.File + for _, id := range ids { + file, err := qb.find(ctx, id) + if err != nil { + return nil, err + } + + if file == nil { + return nil, fmt.Errorf("file with id %d not found", id) + } + + files = append(files, file) + } + + return files, nil +} + +func (qb *FileStore) find(ctx context.Context, id file.ID) (file.File, error) { + q := qb.selectDataset().Where(qb.tableMgr.byID(id)) + + ret, err := qb.get(ctx, q) + if err != nil { + return nil, fmt.Errorf("getting file by id %d: %w", id, err) + } + + return ret, nil +} + +// FindByPath returns the first file that matches the given path. Wildcard characters are supported. +func (qb *FileStore) FindByPath(ctx context.Context, p string) (file.File, error) { + // separate basename from path + basename := filepath.Base(p) + dirName := filepath.Dir(p) + + // replace wildcards + basename = strings.ReplaceAll(basename, "*", "%") + dirName = strings.ReplaceAll(dirName, "*", "%") + + table := qb.table() + folderTable := folderTableMgr.table + + q := qb.selectDataset().Prepared(true).Where( + folderTable.Col("path").Like(dirName), + table.Col("basename").Like(basename), + ) + + ret, err := qb.get(ctx, q) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("getting file by path %s: %w", p, err) + } + + return ret, nil +} + +func (qb *FileStore) allInPaths(q *goqu.SelectDataset, p []string) *goqu.SelectDataset { + folderTable := folderTableMgr.table + + var conds []exp.Expression + for _, pp := range p { + ppWildcard := pp + string(filepath.Separator) + "%" + + conds = append(conds, folderTable.Col("path").Eq(pp), folderTable.Col("path").Like(ppWildcard)) + } + + return q.Where( + goqu.Or(conds...), + ) +} + +// FindAllByPaths returns the all files that are within any of the given paths. +// Returns all if limit is < 0. +// Returns all files if p is empty. +func (qb *FileStore) FindAllInPaths(ctx context.Context, p []string, limit, offset int) ([]file.File, error) { + table := qb.table() + folderTable := folderTableMgr.table + + q := dialect.From(table).Prepared(true).InnerJoin( + folderTable, + goqu.On(table.Col("parent_folder_id").Eq(folderTable.Col(idColumn))), + ).Select(table.Col(idColumn)) + + q = qb.allInPaths(q, p) + + if limit > -1 { + q = q.Limit(uint(limit)) + } + + q = q.Offset(uint(offset)) + + ret, err := qb.findBySubquery(ctx, q) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("getting files by path %s: %w", p, err) + } + + return ret, nil +} + +// CountAllInPaths returns a count of all files that are within any of the given paths. +// Returns count of all files if p is empty. +func (qb *FileStore) CountAllInPaths(ctx context.Context, p []string) (int, error) { + q := qb.countDataset().Prepared(true) + q = qb.allInPaths(q, p) + + return count(ctx, q) +} + +func (qb *FileStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]file.File, error) { + table := qb.table() + + q := qb.selectDataset().Prepared(true).Where( + table.Col(idColumn).Eq( + sq, + ), + ) + + return qb.getMany(ctx, q) +} + +func (qb *FileStore) FindByFingerprint(ctx context.Context, fp file.Fingerprint) ([]file.File, error) { + fingerprintTable := fingerprintTableMgr.table + + fingerprints := fingerprintTable.As("fp") + + sq := dialect.From(fingerprints).Select(fingerprints.Col(fileIDColumn)).Where( + fingerprints.Col("type").Eq(fp.Type), + fingerprints.Col("fingerprint").Eq(fp.Fingerprint), + ) + + return qb.findBySubquery(ctx, sq) +} + +func (qb *FileStore) FindByZipFileID(ctx context.Context, zipFileID file.ID) ([]file.File, error) { + table := qb.table() + + q := qb.selectDataset().Prepared(true).Where( + table.Col("zip_file_id").Eq(zipFileID), + ) + + return qb.getMany(ctx, q) +} + +func (qb *FileStore) validateFilter(fileFilter *models.FileFilterType) error { + const and = "AND" + const or = "OR" + const not = "NOT" + + if fileFilter.And != nil { + if fileFilter.Or != nil { + return illegalFilterCombination(and, or) + } + if fileFilter.Not != nil { + return illegalFilterCombination(and, not) + } + + return qb.validateFilter(fileFilter.And) + } + + if fileFilter.Or != nil { + if fileFilter.Not != nil { + return illegalFilterCombination(or, not) + } + + return qb.validateFilter(fileFilter.Or) + } + + if fileFilter.Not != nil { + return qb.validateFilter(fileFilter.Not) + } + + return nil +} + +func (qb *FileStore) makeFilter(ctx context.Context, fileFilter *models.FileFilterType) *filterBuilder { + query := &filterBuilder{} + + if fileFilter.And != nil { + query.and(qb.makeFilter(ctx, fileFilter.And)) + } + if fileFilter.Or != nil { + query.or(qb.makeFilter(ctx, fileFilter.Or)) + } + if fileFilter.Not != nil { + query.not(qb.makeFilter(ctx, fileFilter.Not)) + } + + query.handleCriterion(ctx, pathCriterionHandler(fileFilter.Path, "folders.path", "files.basename", nil)) + + return query +} + +func (qb *FileStore) Query(ctx context.Context, options models.FileQueryOptions) (*models.FileQueryResult, error) { + fileFilter := options.FileFilter + findFilter := options.FindFilter + + if fileFilter == nil { + fileFilter = &models.FileFilterType{} + } + if findFilter == nil { + findFilter = &models.FindFilterType{} + } + + query := qb.newQuery() + query.join(folderTable, "", "files.parent_folder_id = folders.id") + + distinctIDs(&query, fileTable) + + if q := findFilter.Q; q != nil && *q != "" { + searchColumns := []string{"folders.path", "files.basename"} + query.parseQueryString(searchColumns, *q) + } + + if err := qb.validateFilter(fileFilter); err != nil { + return nil, err + } + filter := qb.makeFilter(ctx, fileFilter) + + query.addFilter(filter) + + qb.setQuerySort(&query, findFilter) + query.sortAndPagination += getPagination(findFilter) + + result, err := qb.queryGroupedFields(ctx, options, query) + if err != nil { + return nil, fmt.Errorf("error querying aggregate fields: %w", err) + } + + idsResult, err := query.findIDs(ctx) + if err != nil { + return nil, fmt.Errorf("error finding IDs: %w", err) + } + + result.IDs = make([]file.ID, len(idsResult)) + for i, id := range idsResult { + result.IDs[i] = file.ID(id) + } + + return result, nil +} + +func (qb *FileStore) queryGroupedFields(ctx context.Context, options models.FileQueryOptions, query queryBuilder) (*models.FileQueryResult, error) { + if !options.Count { + // nothing to do - return empty result + return models.NewFileQueryResult(qb), nil + } + + aggregateQuery := qb.newQuery() + + if options.Count { + aggregateQuery.addColumn("COUNT(temp.id) as total") + } + + const includeSortPagination = false + aggregateQuery.from = fmt.Sprintf("(%s) as temp", query.toSQL(includeSortPagination)) + + out := struct { + Total int + }{} + if err := qb.repository.queryStruct(ctx, aggregateQuery.toSQL(includeSortPagination), query.args, &out); err != nil { + return nil, err + } + + ret := models.NewFileQueryResult(qb) + ret.Count = out.Total + + return ret, nil +} + +func (qb *FileStore) setQuerySort(query *queryBuilder, findFilter *models.FindFilterType) { + if findFilter == nil || findFilter.Sort == nil || *findFilter.Sort == "" { + return + } + sort := findFilter.GetSort("path") + + direction := findFilter.GetDirection() + switch sort { + case "path": + // special handling for path + query.sortAndPagination += fmt.Sprintf(" ORDER BY folders.path %s, files.basename %[1]s", direction) + default: + query.sortAndPagination += getSort(sort, direction, "files") + } +} + +func (qb *FileStore) captionRepository() *captionRepository { + return &captionRepository{ + repository: repository{ + tx: qb.tx, + tableName: videoCaptionsTable, + idColumn: fileIDColumn, + }, + } +} + +func (qb *FileStore) GetCaptions(ctx context.Context, fileID file.ID) ([]*models.VideoCaption, error) { + return qb.captionRepository().get(ctx, fileID) +} + +func (qb *FileStore) UpdateCaptions(ctx context.Context, fileID file.ID, captions []*models.VideoCaption) error { + return qb.captionRepository().replace(ctx, fileID, captions) +} diff --git a/pkg/sqlite/file_test.go b/pkg/sqlite/file_test.go new file mode 100644 index 000000000..0c6deae56 --- /dev/null +++ b/pkg/sqlite/file_test.go @@ -0,0 +1,615 @@ +//go:build integration +// +build integration + +package sqlite_test + +import ( + "context" + "path/filepath" + "testing" + "time" + + "github.com/stashapp/stash/pkg/file" + "github.com/stretchr/testify/assert" +) + +func getFilePath(folderIdx int, basename string) string { + return filepath.Join(folderPaths[folderIdx], basename) +} + +func makeZipFileWithID(index int) file.File { + f := makeFile(index) + + return &file.BaseFile{ + ID: fileIDs[index], + Basename: f.Base().Basename, + Path: getFilePath(fileFolders[index], getFileBaseName(index)), + } +} + +func Test_fileFileStore_Create(t *testing.T) { + var ( + basename = "basename" + fingerprintType = "MD5" + fingerprintValue = "checksum" + fileModTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + size int64 = 1234 + + duration = 1.234 + width = 640 + height = 480 + framerate = 2.345 + bitrate int64 = 234 + videoCodec = "videoCodec" + audioCodec = "audioCodec" + format = "format" + ) + + tests := []struct { + name string + newObject file.File + wantErr bool + }{ + { + "full", + &file.BaseFile{ + DirEntry: file.DirEntry{ + ZipFileID: &fileIDs[fileIdxZip], + ZipFile: makeZipFileWithID(fileIdxZip), + ModTime: fileModTime, + }, + Path: getFilePath(folderIdxWithFiles, basename), + ParentFolderID: folderIDs[folderIdxWithFiles], + Basename: basename, + Size: size, + Fingerprints: []file.Fingerprint{ + { + Type: fingerprintType, + Fingerprint: fingerprintValue, + }, + }, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "video file", + &file.VideoFile{ + BaseFile: &file.BaseFile{ + DirEntry: file.DirEntry{ + ZipFileID: &fileIDs[fileIdxZip], + ZipFile: makeZipFileWithID(fileIdxZip), + ModTime: fileModTime, + }, + Path: getFilePath(folderIdxWithFiles, basename), + ParentFolderID: folderIDs[folderIdxWithFiles], + Basename: basename, + Size: size, + Fingerprints: []file.Fingerprint{ + { + Type: fingerprintType, + Fingerprint: fingerprintValue, + }, + }, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + Duration: duration, + VideoCodec: videoCodec, + AudioCodec: audioCodec, + Format: format, + Width: width, + Height: height, + FrameRate: framerate, + BitRate: bitrate, + }, + false, + }, + { + "image file", + &file.ImageFile{ + BaseFile: &file.BaseFile{ + DirEntry: file.DirEntry{ + ZipFileID: &fileIDs[fileIdxZip], + ZipFile: makeZipFileWithID(fileIdxZip), + ModTime: fileModTime, + }, + Path: getFilePath(folderIdxWithFiles, basename), + ParentFolderID: folderIDs[folderIdxWithFiles], + Basename: basename, + Size: size, + Fingerprints: []file.Fingerprint{ + { + Type: fingerprintType, + Fingerprint: fingerprintValue, + }, + }, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + Format: format, + Width: width, + Height: height, + }, + false, + }, + { + "duplicate path", + &file.BaseFile{ + DirEntry: file.DirEntry{ + ModTime: fileModTime, + }, + Path: getFilePath(folderIdxWithFiles, getFileBaseName(fileIdxZip)), + ParentFolderID: folderIDs[folderIdxWithFiles], + Basename: getFileBaseName(fileIdxZip), + Size: size, + Fingerprints: []file.Fingerprint{ + { + Type: fingerprintType, + Fingerprint: fingerprintValue, + }, + }, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + { + "empty basename", + &file.BaseFile{ + ParentFolderID: folderIDs[folderIdxWithFiles], + }, + true, + }, + { + "missing folder id", + &file.BaseFile{ + Basename: basename, + }, + true, + }, + { + "invalid folder id", + &file.BaseFile{ + DirEntry: file.DirEntry{}, + ParentFolderID: invalidFolderID, + Basename: basename, + }, + true, + }, + { + "invalid zip file id", + &file.BaseFile{ + DirEntry: file.DirEntry{ + ZipFileID: &invalidFileID, + }, + Basename: basename, + }, + true, + }, + } + + qb := db.File + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + s := tt.newObject + if err := qb.Create(ctx, s); (err != nil) != tt.wantErr { + t.Errorf("fileStore.Create() error = %v, wantErr = %v", err, tt.wantErr) + } + + if tt.wantErr { + assert.Zero(s.Base().ID) + return + } + + assert.NotZero(s.Base().ID) + + var copy file.File + switch t := s.(type) { + case *file.BaseFile: + v := *t + copy = &v + case *file.VideoFile: + v := *t + copy = &v + case *file.ImageFile: + v := *t + copy = &v + } + + copy.Base().ID = s.Base().ID + + assert.Equal(copy, s) + + // ensure can find the scene + found, err := qb.Find(ctx, s.Base().ID) + if err != nil { + t.Errorf("fileStore.Find() error = %v", err) + } + + if !assert.Len(found, 1) { + return + } + + assert.Equal(copy, found[0]) + + return + }) + } +} + +func Test_fileStore_Update(t *testing.T) { + var ( + basename = "basename" + fingerprintType = "MD5" + fingerprintValue = "checksum" + fileModTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + size int64 = 1234 + + duration = 1.234 + width = 640 + height = 480 + framerate = 2.345 + bitrate int64 = 234 + videoCodec = "videoCodec" + audioCodec = "audioCodec" + format = "format" + ) + + tests := []struct { + name string + updatedObject file.File + wantErr bool + }{ + { + "full", + &file.BaseFile{ + ID: fileIDs[fileIdxInZip], + DirEntry: file.DirEntry{ + ZipFileID: &fileIDs[fileIdxZip], + ZipFile: makeZipFileWithID(fileIdxZip), + ModTime: fileModTime, + }, + Path: getFilePath(folderIdxWithFiles, basename), + ParentFolderID: folderIDs[folderIdxWithFiles], + Basename: basename, + Size: size, + Fingerprints: []file.Fingerprint{ + { + Type: fingerprintType, + Fingerprint: fingerprintValue, + }, + }, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "video file", + &file.VideoFile{ + BaseFile: &file.BaseFile{ + ID: fileIDs[fileIdxStartVideoFiles], + DirEntry: file.DirEntry{ + ZipFileID: &fileIDs[fileIdxZip], + ZipFile: makeZipFileWithID(fileIdxZip), + ModTime: fileModTime, + }, + Path: getFilePath(folderIdxWithFiles, basename), + ParentFolderID: folderIDs[folderIdxWithFiles], + Basename: basename, + Size: size, + Fingerprints: []file.Fingerprint{ + { + Type: fingerprintType, + Fingerprint: fingerprintValue, + }, + }, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + Duration: duration, + VideoCodec: videoCodec, + AudioCodec: audioCodec, + Format: format, + Width: width, + Height: height, + FrameRate: framerate, + BitRate: bitrate, + }, + false, + }, + { + "image file", + &file.ImageFile{ + BaseFile: &file.BaseFile{ + ID: fileIDs[fileIdxStartImageFiles], + DirEntry: file.DirEntry{ + ZipFileID: &fileIDs[fileIdxZip], + ZipFile: makeZipFileWithID(fileIdxZip), + ModTime: fileModTime, + }, + Path: getFilePath(folderIdxWithFiles, basename), + ParentFolderID: folderIDs[folderIdxWithFiles], + Basename: basename, + Size: size, + Fingerprints: []file.Fingerprint{ + { + Type: fingerprintType, + Fingerprint: fingerprintValue, + }, + }, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + Format: format, + Width: width, + Height: height, + }, + false, + }, + { + "duplicate path", + &file.BaseFile{ + ID: fileIDs[fileIdxInZip], + DirEntry: file.DirEntry{ + ModTime: fileModTime, + }, + Path: getFilePath(folderIdxWithFiles, getFileBaseName(fileIdxZip)), + ParentFolderID: folderIDs[folderIdxWithFiles], + Basename: getFileBaseName(fileIdxZip), + Size: size, + Fingerprints: []file.Fingerprint{ + { + Type: fingerprintType, + Fingerprint: fingerprintValue, + }, + }, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + { + "clear zip", + &file.BaseFile{ + ID: fileIDs[fileIdxInZip], + Path: getFilePath(folderIdxWithFiles, getFileBaseName(fileIdxZip)+".renamed"), + Basename: getFileBaseName(fileIdxZip) + ".renamed", + ParentFolderID: folderIDs[folderIdxWithFiles], + }, + false, + }, + { + "clear folder", + &file.BaseFile{ + ID: fileIDs[fileIdxZip], + Path: basename, + }, + true, + }, + { + "invalid parent folder id", + &file.BaseFile{ + ID: fileIDs[fileIdxZip], + Path: basename, + ParentFolderID: invalidFolderID, + }, + true, + }, + { + "invalid zip file id", + &file.BaseFile{ + ID: fileIDs[fileIdxZip], + Path: basename, + DirEntry: file.DirEntry{ + ZipFileID: &invalidFileID, + }, + ParentFolderID: folderIDs[folderIdxWithFiles], + }, + true, + }, + } + + qb := db.File + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + copy := tt.updatedObject + + if err := qb.Update(ctx, tt.updatedObject); (err != nil) != tt.wantErr { + t.Errorf("FileStore.Update() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wantErr { + return + } + + s, err := qb.Find(ctx, tt.updatedObject.Base().ID) + if err != nil { + t.Errorf("FileStore.Find() error = %v", err) + } + + if !assert.Len(s, 1) { + return + } + + assert.Equal(copy, s[0]) + + return + }) + } +} + +func makeFileWithID(index int) file.File { + ret := makeFile(index) + ret.Base().Path = getFilePath(fileFolders[index], getFileBaseName(index)) + ret.Base().ID = fileIDs[index] + + return ret +} + +func Test_fileStore_Find(t *testing.T) { + tests := []struct { + name string + id file.ID + want file.File + wantErr bool + }{ + { + "valid", + fileIDs[fileIdxZip], + makeFileWithID(fileIdxZip), + false, + }, + { + "invalid", + file.ID(invalidID), + nil, + true, + }, + { + "video file", + fileIDs[fileIdxStartVideoFiles], + makeFileWithID(fileIdxStartVideoFiles), + false, + }, + { + "image file", + fileIDs[fileIdxStartImageFiles], + makeFileWithID(fileIdxStartImageFiles), + false, + }, + } + + qb := db.File + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.Find(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("fileStore.Find() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.want == nil { + assert.Len(got, 0) + return + } + + if !assert.Len(got, 1) { + return + } + + assert.Equal(tt.want, got[0]) + }) + } +} + +func Test_FileStore_FindByPath(t *testing.T) { + getPath := func(index int) string { + folderIdx, found := fileFolders[index] + if !found { + folderIdx = folderIdxWithFiles + } + + return getFilePath(folderIdx, getFileBaseName(index)) + } + + tests := []struct { + name string + path string + want file.File + wantErr bool + }{ + { + "valid", + getPath(fileIdxZip), + makeFileWithID(fileIdxZip), + false, + }, + { + "invalid", + "invalid path", + nil, + false, + }, + } + + qb := db.File + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByPath(ctx, tt.path) + if (err != nil) != tt.wantErr { + t.Errorf("FileStore.FindByPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func TestFileStore_FindByFingerprint(t *testing.T) { + tests := []struct { + name string + fp file.Fingerprint + want []file.File + wantErr bool + }{ + { + "by MD5", + file.Fingerprint{ + Type: "MD5", + Fingerprint: getPrefixedStringValue("file", fileIdxZip, "md5"), + }, + []file.File{makeFileWithID(fileIdxZip)}, + false, + }, + { + "by OSHASH", + file.Fingerprint{ + Type: "OSHASH", + Fingerprint: getPrefixedStringValue("file", fileIdxZip, "oshash"), + }, + []file.File{makeFileWithID(fileIdxZip)}, + false, + }, + { + "non-existing", + file.Fingerprint{ + Type: "OSHASH", + Fingerprint: "foo", + }, + nil, + false, + }, + } + + qb := db.File + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByFingerprint(ctx, tt.fp) + if (err != nil) != tt.wantErr { + t.Errorf("FileStore.FindByFingerprint() error = %v, wantErr %v", err, tt.wantErr) + return + } + + assert.Equal(tt.want, got) + }) + } +} diff --git a/pkg/sqlite/filter.go b/pkg/sqlite/filter.go index 79af98efd..5f5cc8966 100644 --- a/pkg/sqlite/filter.go +++ b/pkg/sqlite/filter.go @@ -1,8 +1,10 @@ package sqlite import ( + "context" "errors" "fmt" + "path/filepath" "regexp" "strconv" "strings" @@ -18,6 +20,13 @@ type sqlClause struct { args []interface{} } +func (c sqlClause) not() sqlClause { + return sqlClause{ + sql: "NOT (" + c.sql + ")", + args: c.args, + } +} + func makeClause(sql string, args ...interface{}) sqlClause { return sqlClause{ sql: sql, @@ -25,14 +34,26 @@ func makeClause(sql string, args ...interface{}) sqlClause { } } -type criterionHandler interface { - handle(f *filterBuilder) +func orClauses(clauses ...sqlClause) sqlClause { + var ret []string + var args []interface{} + + for _, clause := range clauses { + ret = append(ret, "("+clause.sql+")") + args = append(args, clause.args...) + } + + return sqlClause{sql: strings.Join(ret, " OR "), args: args} } -type criterionHandlerFunc func(f *filterBuilder) +type criterionHandler interface { + handle(ctx context.Context, f *filterBuilder) +} -func (h criterionHandlerFunc) handle(f *filterBuilder) { - h(f) +type criterionHandlerFunc func(ctx context.Context, f *filterBuilder) + +func (h criterionHandlerFunc) handle(ctx context.Context, f *filterBuilder) { + h(ctx, f) } type join struct { @@ -331,8 +352,8 @@ func (f *filterBuilder) getError() error { // handleCriterion calls the handle function on the provided criterionHandler, // providing itself. -func (f *filterBuilder) handleCriterion(handler criterionHandler) { - handler.handle(f) +func (f *filterBuilder) handleCriterion(ctx context.Context, handler criterionHandler) { + handler.handle(ctx, f) } func (f *filterBuilder) setError(e error) { @@ -361,16 +382,14 @@ func (f *filterBuilder) andClauses(input []sqlClause) (string, []interface{}) { } func stringCriterionHandler(c *models.StringCriterionInput, column string) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if c != nil { if modifier := c.Modifier; c.Modifier.IsValid() { switch modifier { case models.CriterionModifierIncludes: - clause, thisArgs := getSearchBinding([]string{column}, c.Value, false) - f.addWhere(clause, thisArgs...) + f.whereClauses = append(f.whereClauses, getStringSearchClause([]string{column}, c.Value, false)) case models.CriterionModifierExcludes: - clause, thisArgs := getSearchBinding([]string{column}, c.Value, true) - f.addWhere(clause, thisArgs...) + f.whereClauses = append(f.whereClauses, getStringSearchClause([]string{column}, c.Value, true)) case models.CriterionModifierEquals: f.addWhere(column+" LIKE ?", c.Value) case models.CriterionModifierNotEquals: @@ -399,8 +418,104 @@ func stringCriterionHandler(c *models.StringCriterionInput, column string) crite } } -func intCriterionHandler(c *models.IntCriterionInput, column string) criterionHandlerFunc { - return func(f *filterBuilder) { +func pathCriterionHandler(c *models.StringCriterionInput, pathColumn string, basenameColumn string, addJoinFn func(f *filterBuilder)) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { + if c != nil { + if addJoinFn != nil { + addJoinFn(f) + } + addWildcards := true + not := false + + if modifier := c.Modifier; c.Modifier.IsValid() { + switch modifier { + case models.CriterionModifierIncludes: + f.whereClauses = append(f.whereClauses, getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not)) + case models.CriterionModifierExcludes: + not = true + f.whereClauses = append(f.whereClauses, getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not)) + case models.CriterionModifierEquals: + addWildcards = false + f.whereClauses = append(f.whereClauses, getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not)) + case models.CriterionModifierNotEquals: + addWildcards = false + not = true + f.whereClauses = append(f.whereClauses, getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not)) + case models.CriterionModifierMatchesRegex: + if _, err := regexp.Compile(c.Value); err != nil { + f.setError(err) + return + } + f.addWhere(fmt.Sprintf("%s IS NOT NULL AND %s IS NOT NULL AND %[1]s || '%[3]s' || %[2]s regexp ?", pathColumn, basenameColumn, string(filepath.Separator)), c.Value) + case models.CriterionModifierNotMatchesRegex: + if _, err := regexp.Compile(c.Value); err != nil { + f.setError(err) + return + } + f.addWhere(fmt.Sprintf("%s IS NULL OR %s IS NULL OR %[1]s || '%[3]s' || %[2]s NOT regexp ?", pathColumn, basenameColumn, string(filepath.Separator)), c.Value) + case models.CriterionModifierIsNull: + f.addWhere(fmt.Sprintf("(%s IS NULL OR TRIM(%[1]s) = '' OR %s IS NULL OR TRIM(%[2]s) = '')", pathColumn, basenameColumn)) + case models.CriterionModifierNotNull: + f.addWhere(fmt.Sprintf("(%s IS NOT NULL AND TRIM(%[1]s) != '' AND %s IS NOT NULL AND TRIM(%[2]s) != '')", pathColumn, basenameColumn)) + default: + panic("unsupported string filter modifier") + } + } + } + } +} + +func getPathSearchClause(pathColumn, basenameColumn, p string, addWildcards, not bool) sqlClause { + // if path value has slashes, then we're potentially searching directory only or + // directory plus basename + hasSlashes := strings.Contains(p, string(filepath.Separator)) + trailingSlash := hasSlashes && p[len(p)-1] == filepath.Separator + const emptyDir = string(filepath.Separator) + + // possible values: + // dir/basename + // dir1/subdir + // dir/ + // /basename + // dirOrBasename + + basename := filepath.Base(p) + dir := filepath.Dir(p) + + if addWildcards { + p = "%" + p + "%" + basename += "%" + dir = "%" + dir + } + + var ret sqlClause + + switch { + case !hasSlashes: + // dir or basename + ret = makeClause(fmt.Sprintf("%s LIKE ? OR %s LIKE ?", pathColumn, basenameColumn), p, p) + case dir != emptyDir && !trailingSlash: + // (path like %dir AND basename like basename%) OR path like %p% + c1 := makeClause(fmt.Sprintf("%s LIKE ? AND %s LIKE ?", pathColumn, basenameColumn), dir, basename) + c2 := makeClause(fmt.Sprintf("%s LIKE ?", pathColumn), p) + ret = orClauses(c1, c2) + case dir == emptyDir && !trailingSlash: + // path like %p% OR basename like basename% + ret = makeClause(fmt.Sprintf("%s LIKE ? OR %s LIKE ?", pathColumn, basenameColumn), p, basename) + case dir != emptyDir && trailingSlash: + // path like %p% OR path like %dir + ret = makeClause(fmt.Sprintf("%s LIKE ? OR %[1]s LIKE ?", pathColumn), p, dir) + } + + if not { + ret = ret.not() + } + + return ret +} + +func intCriterionHandler(c *models.IntCriterionInput, column string, addJoinFn func(f *filterBuilder)) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if c != nil { clause, args := getIntCriterionWhereClause(column, *c) f.addWhere(clause, args...) @@ -408,9 +523,12 @@ func intCriterionHandler(c *models.IntCriterionInput, column string) criterionHa } } -func boolCriterionHandler(c *bool, column string) criterionHandlerFunc { - return func(f *filterBuilder) { +func boolCriterionHandler(c *bool, column string, addJoinFn func(f *filterBuilder)) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if c != nil { + if addJoinFn != nil { + addJoinFn(f) + } var v string if *c { v = "1" @@ -441,7 +559,7 @@ type joinedMultiCriterionHandlerBuilder struct { } func (m *joinedMultiCriterionHandlerBuilder) handler(criterion *models.MultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if criterion != nil { joinAlias := m.joinAs if joinAlias == "" { @@ -511,7 +629,7 @@ type multiCriterionHandlerBuilder struct { } func (m *multiCriterionHandlerBuilder) handler(criterion *models.MultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if criterion != nil { if criterion.Modifier == models.CriterionModifierIsNull || criterion.Modifier == models.CriterionModifierNotNull { var notClause string @@ -556,7 +674,7 @@ type countCriterionHandlerBuilder struct { } func (m *countCriterionHandlerBuilder) handler(criterion *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if criterion != nil { clause, args := getCountCriterionClause(m.primaryTable, m.joinTable, m.primaryFK, *criterion) @@ -576,17 +694,17 @@ type stringListCriterionHandlerBuilder struct { } func (m *stringListCriterionHandlerBuilder) handler(criterion *models.StringCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if criterion != nil && len(criterion.Value) > 0 { m.addJoinTable(f) - stringCriterionHandler(criterion, m.joinTable+"."+m.stringColumn)(f) + stringCriterionHandler(criterion, m.joinTable+"."+m.stringColumn)(ctx, f) } } } type hierarchicalMultiCriterionHandlerBuilder struct { - tx dbi + tx dbWrapper primaryTable string foreignTable string @@ -597,7 +715,7 @@ type hierarchicalMultiCriterionHandlerBuilder struct { relationsTable string } -func getHierarchicalValues(tx dbi, values []string, table, relationsTable, parentFK string, depth *int) string { +func getHierarchicalValues(ctx context.Context, tx dbWrapper, values []string, table, relationsTable, parentFK string, depth *int) string { var args []interface{} depthVal := 0 @@ -670,7 +788,7 @@ WHERE id in {inBinding} query := fmt.Sprintf("WITH RECURSIVE %s SELECT 'VALUES' || GROUP_CONCAT('(' || root_id || ', ' || item_id || ')') AS val FROM items", withClause) var valuesClause string - err := tx.Get(&valuesClause, query, args...) + err := tx.Get(ctx, &valuesClause, query, args...) if err != nil { logger.Error(err) // return record which never matches so we don't have to handle error here @@ -693,7 +811,7 @@ func addHierarchicalConditionClauses(f *filterBuilder, criterion *models.Hierarc } func (m *hierarchicalMultiCriterionHandlerBuilder) handler(criterion *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if criterion != nil { if criterion.Modifier == models.CriterionModifierIsNull || criterion.Modifier == models.CriterionModifierNotNull { var notClause string @@ -713,7 +831,7 @@ func (m *hierarchicalMultiCriterionHandlerBuilder) handler(criterion *models.Hie return } - valuesClause := getHierarchicalValues(m.tx, criterion.Value, m.foreignTable, m.relationsTable, m.parentFK, criterion.Depth) + valuesClause := getHierarchicalValues(ctx, m.tx, criterion.Value, m.foreignTable, m.relationsTable, m.parentFK, criterion.Depth) f.addLeftJoin("(SELECT column1 AS root_id, column2 AS item_id FROM ("+valuesClause+"))", m.derivedTable, fmt.Sprintf("%s.item_id = %s.%s", m.derivedTable, m.primaryTable, m.foreignFK)) @@ -723,7 +841,7 @@ func (m *hierarchicalMultiCriterionHandlerBuilder) handler(criterion *models.Hie } type joinedHierarchicalMultiCriterionHandlerBuilder struct { - tx dbi + tx dbWrapper primaryTable string foreignTable string @@ -738,7 +856,7 @@ type joinedHierarchicalMultiCriterionHandlerBuilder struct { } func (m *joinedHierarchicalMultiCriterionHandlerBuilder) handler(criterion *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if criterion != nil { joinAlias := m.joinAs @@ -762,7 +880,7 @@ func (m *joinedHierarchicalMultiCriterionHandlerBuilder) handler(criterion *mode return } - valuesClause := getHierarchicalValues(m.tx, criterion.Value, m.foreignTable, m.relationsTable, m.parentFK, criterion.Depth) + valuesClause := getHierarchicalValues(ctx, m.tx, criterion.Value, m.foreignTable, m.relationsTable, m.parentFK, criterion.Depth) joinTable := utils.StrFormat(`( SELECT j.*, d.column1 AS root_id, d.column2 AS item_id FROM {joinTable} AS j diff --git a/pkg/sqlite/filter_internal_test.go b/pkg/sqlite/filter_internal_test.go index e9f173de0..f416b661c 100644 --- a/pkg/sqlite/filter_internal_test.go +++ b/pkg/sqlite/filter_internal_test.go @@ -1,6 +1,7 @@ package sqlite import ( + "context" "errors" "fmt" "testing" @@ -9,6 +10,8 @@ import ( "github.com/stretchr/testify/assert" ) +var testCtx = context.Background() + func TestJoinsAddJoin(t *testing.T) { var joins joins @@ -462,7 +465,7 @@ func TestStringCriterionHandlerIncludes(t *testing.T) { const quotedValue = `"two words"` f := &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierIncludes, Value: value1, }, column)) @@ -474,7 +477,7 @@ func TestStringCriterionHandlerIncludes(t *testing.T) { assert.Equal("%words%", f.whereClauses[0].args[1]) f = &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierIncludes, Value: quotedValue, }, column)) @@ -493,7 +496,7 @@ func TestStringCriterionHandlerExcludes(t *testing.T) { const quotedValue = `"two words"` f := &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierExcludes, Value: value1, }, column)) @@ -505,7 +508,7 @@ func TestStringCriterionHandlerExcludes(t *testing.T) { assert.Equal("%words%", f.whereClauses[0].args[1]) f = &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierExcludes, Value: quotedValue, }, column)) @@ -523,7 +526,7 @@ func TestStringCriterionHandlerEquals(t *testing.T) { const value1 = "two words" f := &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierEquals, Value: value1, }, column)) @@ -541,7 +544,7 @@ func TestStringCriterionHandlerNotEquals(t *testing.T) { const value1 = "two words" f := &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierNotEquals, Value: value1, }, column)) @@ -560,7 +563,7 @@ func TestStringCriterionHandlerMatchesRegex(t *testing.T) { const invalidValue = "*two words" f := &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierMatchesRegex, Value: validValue, }, column)) @@ -572,7 +575,7 @@ func TestStringCriterionHandlerMatchesRegex(t *testing.T) { // ensure invalid regex sets error state f = &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierMatchesRegex, Value: invalidValue, }, column)) @@ -588,7 +591,7 @@ func TestStringCriterionHandlerNotMatchesRegex(t *testing.T) { const invalidValue = "*two words" f := &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierNotMatchesRegex, Value: validValue, }, column)) @@ -600,7 +603,7 @@ func TestStringCriterionHandlerNotMatchesRegex(t *testing.T) { // ensure invalid regex sets error state f = &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierNotMatchesRegex, Value: invalidValue, }, column)) @@ -614,7 +617,7 @@ func TestStringCriterionHandlerIsNull(t *testing.T) { const column = "column" f := &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierIsNull, }, column)) @@ -629,7 +632,7 @@ func TestStringCriterionHandlerNotNull(t *testing.T) { const column = "column" f := &filterBuilder{} - f.handleCriterion(stringCriterionHandler(&models.StringCriterionInput{ + f.handleCriterion(testCtx, stringCriterionHandler(&models.StringCriterionInput{ Modifier: models.CriterionModifierNotNull, }, column)) diff --git a/pkg/sqlite/fingerprint.go b/pkg/sqlite/fingerprint.go new file mode 100644 index 000000000..0f7c36d12 --- /dev/null +++ b/pkg/sqlite/fingerprint.go @@ -0,0 +1,81 @@ +package sqlite + +import ( + "context" + "fmt" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" + "github.com/stashapp/stash/pkg/file" + "gopkg.in/guregu/null.v4" +) + +const ( + fingerprintTable = "files_fingerprints" +) + +type fingerprintQueryRow struct { + Type null.String `db:"fingerprint_type"` + Fingerprint interface{} `db:"fingerprint"` +} + +func (r fingerprintQueryRow) valid() bool { + return r.Type.Valid +} + +func (r *fingerprintQueryRow) resolve() file.Fingerprint { + return file.Fingerprint{ + Type: r.Type.String, + Fingerprint: r.Fingerprint, + } +} + +type fingerprintQueryBuilder struct { + repository + + tableMgr *table +} + +var FingerprintReaderWriter = &fingerprintQueryBuilder{ + repository: repository{ + tableName: fingerprintTable, + idColumn: fileIDColumn, + }, + + tableMgr: fingerprintTableMgr, +} + +func (qb *fingerprintQueryBuilder) insert(ctx context.Context, fileID file.ID, f file.Fingerprint) error { + table := qb.table() + q := dialect.Insert(table).Cols(fileIDColumn, "type", "fingerprint").Vals( + goqu.Vals{fileID, f.Type, f.Fingerprint}, + ) + _, err := exec(ctx, q) + if err != nil { + return fmt.Errorf("inserting into %s: %w", table.GetTable(), err) + } + + return nil +} + +func (qb *fingerprintQueryBuilder) insertJoins(ctx context.Context, fileID file.ID, f []file.Fingerprint) error { + for _, ff := range f { + if err := qb.insert(ctx, fileID, ff); err != nil { + return err + } + } + + return nil +} + +func (qb *fingerprintQueryBuilder) replaceJoins(ctx context.Context, fileID file.ID, f []file.Fingerprint) error { + if err := qb.destroy(ctx, []int{int(fileID)}); err != nil { + return err + } + + return qb.insertJoins(ctx, fileID, f) +} + +func (qb *fingerprintQueryBuilder) table() exp.IdentifierExpression { + return qb.tableMgr.table +} diff --git a/pkg/sqlite/folder.go b/pkg/sqlite/folder.go new file mode 100644 index 000000000..f9333c782 --- /dev/null +++ b/pkg/sqlite/folder.go @@ -0,0 +1,312 @@ +package sqlite + +import ( + "context" + "database/sql" + "errors" + "fmt" + "path/filepath" + "time" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" + "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/file" + "gopkg.in/guregu/null.v4" +) + +const folderTable = "folders" + +type folderRow struct { + ID file.FolderID `db:"id" goqu:"skipinsert"` + Path string `db:"path"` + ZipFileID null.Int `db:"zip_file_id"` + ParentFolderID null.Int `db:"parent_folder_id"` + ModTime time.Time `db:"mod_time"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +func (r *folderRow) fromFolder(o file.Folder) { + r.ID = o.ID + r.Path = o.Path + r.ZipFileID = nullIntFromFileIDPtr(o.ZipFileID) + r.ParentFolderID = nullIntFromFolderIDPtr(o.ParentFolderID) + r.ModTime = o.ModTime + r.CreatedAt = o.CreatedAt + r.UpdatedAt = o.UpdatedAt +} + +type folderQueryRow struct { + folderRow + + ZipBasename null.String `db:"zip_basename"` + ZipFolderPath null.String `db:"zip_folder_path"` +} + +func (r *folderQueryRow) resolve() *file.Folder { + ret := &file.Folder{ + ID: r.ID, + DirEntry: file.DirEntry{ + ZipFileID: nullIntFileIDPtr(r.ZipFileID), + ModTime: r.ModTime, + }, + Path: string(r.Path), + ParentFolderID: nullIntFolderIDPtr(r.ParentFolderID), + CreatedAt: r.CreatedAt, + UpdatedAt: r.UpdatedAt, + } + + if ret.ZipFileID != nil && r.ZipFolderPath.Valid && r.ZipBasename.Valid { + ret.ZipFile = &file.BaseFile{ + ID: *ret.ZipFileID, + Path: filepath.Join(r.ZipFolderPath.String, r.ZipBasename.String), + Basename: r.ZipBasename.String, + } + } + + return ret +} + +type folderQueryRows []folderQueryRow + +func (r folderQueryRows) resolve() []*file.Folder { + var ret []*file.Folder + + for _, row := range r { + f := row.resolve() + ret = append(ret, f) + } + + return ret +} + +type FolderStore struct { + repository + + tableMgr *table +} + +func NewFolderStore() *FolderStore { + return &FolderStore{ + repository: repository{ + tableName: sceneTable, + idColumn: idColumn, + }, + + tableMgr: folderTableMgr, + } +} + +func (qb *FolderStore) Create(ctx context.Context, f *file.Folder) error { + var r folderRow + r.fromFolder(*f) + + id, err := qb.tableMgr.insertID(ctx, r) + if err != nil { + return err + } + + // only assign id once we are successful + f.ID = file.FolderID(id) + + return nil +} + +func (qb *FolderStore) Update(ctx context.Context, updatedObject *file.Folder) error { + var r folderRow + r.fromFolder(*updatedObject) + + if err := qb.tableMgr.updateByID(ctx, updatedObject.ID, r); err != nil { + return err + } + + return nil +} + +func (qb *FolderStore) Destroy(ctx context.Context, id file.FolderID) error { + return qb.tableMgr.destroyExisting(ctx, []int{int(id)}) +} + +func (qb *FolderStore) table() exp.IdentifierExpression { + return qb.tableMgr.table +} + +func (qb *FolderStore) selectDataset() *goqu.SelectDataset { + table := qb.table() + fileTable := fileTableMgr.table + + zipFileTable := fileTable.As("zip_files") + zipFolderTable := table.As("zip_files_folders") + + cols := []interface{}{ + table.Col("id"), + table.Col("path"), + table.Col("zip_file_id"), + table.Col("parent_folder_id"), + table.Col("mod_time"), + table.Col("created_at"), + table.Col("updated_at"), + zipFileTable.Col("basename").As("zip_basename"), + zipFolderTable.Col("path").As("zip_folder_path"), + } + + ret := dialect.From(table).Select(cols...) + + return ret.LeftJoin( + zipFileTable, + goqu.On(table.Col("zip_file_id").Eq(zipFileTable.Col("id"))), + ).LeftJoin( + zipFolderTable, + goqu.On(zipFileTable.Col("parent_folder_id").Eq(zipFolderTable.Col(idColumn))), + ) +} + +func (qb *FolderStore) countDataset() *goqu.SelectDataset { + table := qb.table() + fileTable := fileTableMgr.table + + zipFileTable := fileTable.As("zip_files") + zipFolderTable := table.As("zip_files_folders") + + ret := dialect.From(table).Select(goqu.COUNT(goqu.DISTINCT(table.Col("id")))) + + return ret.LeftJoin( + zipFileTable, + goqu.On(table.Col("zip_file_id").Eq(zipFileTable.Col("id"))), + ).LeftJoin( + zipFolderTable, + goqu.On(zipFileTable.Col("parent_folder_id").Eq(zipFolderTable.Col(idColumn))), + ) +} + +func (qb *FolderStore) get(ctx context.Context, q *goqu.SelectDataset) (*file.Folder, error) { + ret, err := qb.getMany(ctx, q) + if err != nil { + return nil, err + } + + if len(ret) == 0 { + return nil, sql.ErrNoRows + } + + return ret[0], nil +} + +func (qb *FolderStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*file.Folder, error) { + const single = false + var rows folderQueryRows + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f folderQueryRow + if err := r.StructScan(&f); err != nil { + return err + } + + rows = append(rows, f) + return nil + }); err != nil { + return nil, err + } + + return rows.resolve(), nil +} + +func (qb *FolderStore) Find(ctx context.Context, id file.FolderID) (*file.Folder, error) { + q := qb.selectDataset().Where(qb.tableMgr.byID(id)) + + ret, err := qb.get(ctx, q) + if err != nil { + return nil, fmt.Errorf("getting folder by id %d: %w", id, err) + } + + return ret, nil +} + +func (qb *FolderStore) FindByPath(ctx context.Context, p string) (*file.Folder, error) { + q := qb.selectDataset().Prepared(true).Where(qb.table().Col("path").Eq(p)) + + ret, err := qb.get(ctx, q) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("getting folder by path %s: %w", p, err) + } + + return ret, nil +} + +func (qb *FolderStore) FindByParentFolderID(ctx context.Context, parentFolderID file.FolderID) ([]*file.Folder, error) { + q := qb.selectDataset().Where(qb.table().Col("parent_folder_id").Eq(int(parentFolderID))) + + ret, err := qb.getMany(ctx, q) + if err != nil { + return nil, fmt.Errorf("getting folders by parent folder id %d: %w", parentFolderID, err) + } + + return ret, nil +} + +func (qb *FolderStore) allInPaths(q *goqu.SelectDataset, p []string) *goqu.SelectDataset { + table := qb.table() + + var conds []exp.Expression + for _, pp := range p { + ppWildcard := pp + string(filepath.Separator) + "%" + + conds = append(conds, table.Col("path").Eq(pp), table.Col("path").Like(ppWildcard)) + } + + return q.Where( + goqu.Or(conds...), + ) +} + +// FindAllInPaths returns the all folders that are or are within any of the given paths. +// Returns all if limit is < 0. +// Returns all folders if p is empty. +func (qb *FolderStore) FindAllInPaths(ctx context.Context, p []string, limit, offset int) ([]*file.Folder, error) { + q := qb.selectDataset().Prepared(true) + q = qb.allInPaths(q, p) + + if limit > -1 { + q = q.Limit(uint(limit)) + } + + q = q.Offset(uint(offset)) + + ret, err := qb.getMany(ctx, q) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("getting folders in path %s: %w", p, err) + } + + return ret, nil +} + +// CountAllInPaths returns a count of all folders that are within any of the given paths. +// Returns count of all folders if p is empty. +func (qb *FolderStore) CountAllInPaths(ctx context.Context, p []string) (int, error) { + q := qb.countDataset().Prepared(true) + q = qb.allInPaths(q, p) + + return count(ctx, q) +} + +// func (qb *FolderStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*file.Folder, error) { +// table := qb.table() + +// q := qb.selectDataset().Prepared(true).Where( +// table.Col(idColumn).Eq( +// sq, +// ), +// ) + +// return qb.getMany(ctx, q) +// } + +func (qb *FolderStore) FindByZipFileID(ctx context.Context, zipFileID file.ID) ([]*file.Folder, error) { + table := qb.table() + + q := qb.selectDataset().Prepared(true).Where( + table.Col("zip_file_id").Eq(zipFileID), + ) + + return qb.getMany(ctx, q) +} diff --git a/pkg/sqlite/folder_test.go b/pkg/sqlite/folder_test.go new file mode 100644 index 000000000..5596205c8 --- /dev/null +++ b/pkg/sqlite/folder_test.go @@ -0,0 +1,241 @@ +//go:build integration +// +build integration + +package sqlite_test + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/stashapp/stash/pkg/file" + "github.com/stretchr/testify/assert" +) + +var ( + invalidFolderID = file.FolderID(invalidID) + invalidFileID = file.ID(invalidID) +) + +func Test_FolderStore_Create(t *testing.T) { + var ( + path = "path" + fileModTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + ) + + tests := []struct { + name string + newObject file.Folder + wantErr bool + }{ + { + "full", + file.Folder{ + DirEntry: file.DirEntry{ + ZipFileID: &fileIDs[fileIdxZip], + ZipFile: makeZipFileWithID(fileIdxZip), + ModTime: fileModTime, + }, + Path: path, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "invalid parent folder id", + file.Folder{ + Path: path, + ParentFolderID: &invalidFolderID, + }, + true, + }, + { + "invalid zip file id", + file.Folder{ + DirEntry: file.DirEntry{ + ZipFileID: &invalidFileID, + }, + Path: path, + }, + true, + }, + } + + qb := db.Folder + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + s := tt.newObject + if err := qb.Create(ctx, &s); (err != nil) != tt.wantErr { + t.Errorf("FolderStore.Create() error = %v, wantErr = %v", err, tt.wantErr) + } + + if tt.wantErr { + assert.Zero(s.ID) + return + } + + assert.NotZero(s.ID) + + copy := tt.newObject + copy.ID = s.ID + + assert.Equal(copy, s) + + // ensure can find the folder + found, err := qb.FindByPath(ctx, path) + if err != nil { + t.Errorf("FolderStore.Find() error = %v", err) + } + + assert.Equal(copy, *found) + }) + } +} + +func Test_FolderStore_Update(t *testing.T) { + var ( + path = "path" + fileModTime = time.Date(2000, 1, 2, 3, 4, 5, 6, time.UTC) + createdAt = time.Date(2001, 1, 2, 3, 4, 5, 6, time.UTC) + updatedAt = time.Date(2002, 1, 2, 3, 4, 5, 6, time.UTC) + ) + + tests := []struct { + name string + updatedObject *file.Folder + wantErr bool + }{ + { + "full", + &file.Folder{ + ID: folderIDs[folderIdxWithParentFolder], + DirEntry: file.DirEntry{ + ZipFileID: &fileIDs[fileIdxZip], + ZipFile: makeZipFileWithID(fileIdxZip), + ModTime: fileModTime, + }, + Path: path, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "clear zip", + &file.Folder{ + ID: folderIDs[folderIdxInZip], + Path: path, + }, + false, + }, + { + "clear folder", + &file.Folder{ + ID: folderIDs[folderIdxWithParentFolder], + Path: path, + }, + false, + }, + { + "invalid parent folder id", + &file.Folder{ + ID: folderIDs[folderIdxWithParentFolder], + Path: path, + ParentFolderID: &invalidFolderID, + }, + true, + }, + { + "invalid zip file id", + &file.Folder{ + ID: folderIDs[folderIdxWithParentFolder], + DirEntry: file.DirEntry{ + ZipFileID: &invalidFileID, + }, + Path: path, + }, + true, + }, + } + + qb := db.Folder + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + copy := *tt.updatedObject + + if err := qb.Update(ctx, tt.updatedObject); (err != nil) != tt.wantErr { + t.Errorf("FolderStore.Update() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wantErr { + return + } + + s, err := qb.FindByPath(ctx, path) + if err != nil { + t.Errorf("FolderStore.Find() error = %v", err) + } + + assert.Equal(copy, *s) + + return + }) + } +} + +func makeFolderWithID(index int) *file.Folder { + ret := makeFolder(index) + ret.ID = folderIDs[index] + + return &ret +} + +func Test_FolderStore_FindByPath(t *testing.T) { + getPath := func(index int) string { + return folderPaths[index] + } + + tests := []struct { + name string + path string + want *file.Folder + wantErr bool + }{ + { + "valid", + getPath(folderIdxWithFiles), + makeFolderWithID(folderIdxWithFiles), + false, + }, + { + "invalid", + "invalid path", + nil, + false, + }, + } + + qb := db.Folder + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.FindByPath(ctx, tt.path) + if (err != nil) != tt.wantErr { + t.Errorf("FolderStore.FindByPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("FolderStore.FindByPath() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/database/functions.go b/pkg/sqlite/functions.go similarity index 96% rename from pkg/database/functions.go rename to pkg/sqlite/functions.go index 2971f1e22..29e93aa22 100644 --- a/pkg/database/functions.go +++ b/pkg/sqlite/functions.go @@ -1,4 +1,4 @@ -package database +package sqlite import ( "strconv" diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index b7f9276ac..995f37cb8 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -1,165 +1,587 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" + "path/filepath" + "regexp" + "time" + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" + "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" + "gopkg.in/guregu/null.v4" + "gopkg.in/guregu/null.v4/zero" ) -const galleryTable = "galleries" +const ( + galleryTable = "galleries" -const performersGalleriesTable = "performers_galleries" -const galleriesTagsTable = "galleries_tags" -const galleriesImagesTable = "galleries_images" -const galleriesScenesTable = "scenes_galleries" -const galleryIDColumn = "gallery_id" + galleriesFilesTable = "galleries_files" + performersGalleriesTable = "performers_galleries" + galleriesTagsTable = "galleries_tags" + galleriesImagesTable = "galleries_images" + galleriesScenesTable = "scenes_galleries" + galleryIDColumn = "gallery_id" +) -type galleryQueryBuilder struct { - repository +type galleryRow struct { + ID int `db:"id" goqu:"skipinsert"` + Title zero.String `db:"title"` + URL zero.String `db:"url"` + Date models.SQLiteDate `db:"date"` + Details zero.String `db:"details"` + Rating null.Int `db:"rating"` + Organized bool `db:"organized"` + StudioID null.Int `db:"studio_id,omitempty"` + FolderID null.Int `db:"folder_id,omitempty"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` } -func NewGalleryReaderWriter(tx dbi) *galleryQueryBuilder { - return &galleryQueryBuilder{ - repository{ - tx: tx, +func (r *galleryRow) fromGallery(o models.Gallery) { + r.ID = o.ID + r.Title = zero.StringFrom(o.Title) + r.URL = zero.StringFrom(o.URL) + if o.Date != nil { + _ = r.Date.Scan(o.Date.Time) + } + r.Details = zero.StringFrom(o.Details) + r.Rating = intFromPtr(o.Rating) + r.Organized = o.Organized + r.StudioID = intFromPtr(o.StudioID) + r.FolderID = nullIntFromFolderIDPtr(o.FolderID) + r.CreatedAt = o.CreatedAt + r.UpdatedAt = o.UpdatedAt +} + +type galleryQueryRow struct { + galleryRow + FolderPath zero.String `db:"folder_path"` + PrimaryFileID null.Int `db:"primary_file_id"` + PrimaryFileFolderPath zero.String `db:"primary_file_folder_path"` + PrimaryFileBasename zero.String `db:"primary_file_basename"` + PrimaryFileChecksum zero.String `db:"primary_file_checksum"` +} + +func (r *galleryQueryRow) resolve() *models.Gallery { + ret := &models.Gallery{ + ID: r.ID, + Title: r.Title.String, + URL: r.URL.String, + Date: r.Date.DatePtr(), + Details: r.Details.String, + Rating: nullIntPtr(r.Rating), + Organized: r.Organized, + StudioID: nullIntPtr(r.StudioID), + FolderID: nullIntFolderIDPtr(r.FolderID), + PrimaryFileID: nullIntFileIDPtr(r.PrimaryFileID), + CreatedAt: r.CreatedAt, + UpdatedAt: r.UpdatedAt, + } + + if r.PrimaryFileFolderPath.Valid && r.PrimaryFileBasename.Valid { + ret.Path = filepath.Join(r.PrimaryFileFolderPath.String, r.PrimaryFileBasename.String) + } else if r.FolderPath.Valid { + ret.Path = r.FolderPath.String + } + + return ret +} + +type galleryRowRecord struct { + updateRecord +} + +func (r *galleryRowRecord) fromPartial(o models.GalleryPartial) { + r.setNullString("title", o.Title) + r.setNullString("url", o.URL) + r.setSQLiteDate("date", o.Date) + r.setNullString("details", o.Details) + r.setNullInt("rating", o.Rating) + r.setBool("organized", o.Organized) + r.setNullInt("studio_id", o.StudioID) + r.setTime("created_at", o.CreatedAt) + r.setTime("updated_at", o.UpdatedAt) +} + +type GalleryStore struct { + repository + + tableMgr *table + + fileStore *FileStore + folderStore *FolderStore +} + +func NewGalleryStore(fileStore *FileStore, folderStore *FolderStore) *GalleryStore { + return &GalleryStore{ + repository: repository{ tableName: galleryTable, idColumn: idColumn, }, + tableMgr: galleryTableMgr, + fileStore: fileStore, + folderStore: folderStore, } } -func (qb *galleryQueryBuilder) Create(newObject models.Gallery) (*models.Gallery, error) { - var ret models.Gallery - if err := qb.insertObject(newObject, &ret); err != nil { - return nil, err +func (qb *GalleryStore) table() exp.IdentifierExpression { + return qb.tableMgr.table +} + +func (qb *GalleryStore) Create(ctx context.Context, newObject *models.Gallery, fileIDs []file.ID) error { + var r galleryRow + r.fromGallery(*newObject) + + id, err := qb.tableMgr.insertID(ctx, r) + if err != nil { + return err } - return &ret, nil -} - -func (qb *galleryQueryBuilder) Update(updatedObject models.Gallery) (*models.Gallery, error) { - const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { - return nil, err - } - - return qb.Find(updatedObject.ID) -} - -func (qb *galleryQueryBuilder) UpdatePartial(updatedObject models.GalleryPartial) (*models.Gallery, error) { - const partial = true - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { - return nil, err - } - - return qb.Find(updatedObject.ID) -} - -func (qb *galleryQueryBuilder) UpdateChecksum(id int, checksum string) error { - return qb.updateMap(id, map[string]interface{}{ - "checksum": checksum, - }) -} - -func (qb *galleryQueryBuilder) UpdateFileModTime(id int, modTime models.NullSQLiteTimestamp) error { - return qb.updateMap(id, map[string]interface{}{ - "file_mod_time": modTime, - }) -} - -func (qb *galleryQueryBuilder) Destroy(id int) error { - return qb.destroyExisting([]int{id}) -} - -func (qb *galleryQueryBuilder) Find(id int) (*models.Gallery, error) { - var ret models.Gallery - if err := qb.get(id, &ret); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, nil + if len(fileIDs) > 0 { + const firstPrimary = true + if err := galleriesFilesTableMgr.insertJoins(ctx, id, firstPrimary, fileIDs); err != nil { + return err } - return nil, err } - return &ret, nil + + if newObject.PerformerIDs.Loaded() { + if err := galleriesPerformersTableMgr.insertJoins(ctx, id, newObject.PerformerIDs.List()); err != nil { + return err + } + } + if newObject.TagIDs.Loaded() { + if err := galleriesTagsTableMgr.insertJoins(ctx, id, newObject.TagIDs.List()); err != nil { + return err + } + } + if newObject.SceneIDs.Loaded() { + if err := galleriesScenesTableMgr.insertJoins(ctx, id, newObject.SceneIDs.List()); err != nil { + return err + } + } + + updated, err := qb.Find(ctx, id) + if err != nil { + return fmt.Errorf("finding after create: %w", err) + } + + *newObject = *updated + + return nil } -func (qb *galleryQueryBuilder) FindMany(ids []int) ([]*models.Gallery, error) { - var galleries []*models.Gallery - for _, id := range ids { - gallery, err := qb.Find(id) - if err != nil { +func (qb *GalleryStore) Update(ctx context.Context, updatedObject *models.Gallery) error { + var r galleryRow + r.fromGallery(*updatedObject) + + if err := qb.tableMgr.updateByID(ctx, updatedObject.ID, r); err != nil { + return err + } + + if updatedObject.PerformerIDs.Loaded() { + if err := galleriesPerformersTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.PerformerIDs.List()); err != nil { + return err + } + } + if updatedObject.TagIDs.Loaded() { + if err := galleriesTagsTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.TagIDs.List()); err != nil { + return err + } + } + if updatedObject.SceneIDs.Loaded() { + if err := galleriesScenesTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.SceneIDs.List()); err != nil { + return err + } + } + + if updatedObject.Files.Loaded() { + fileIDs := make([]file.ID, len(updatedObject.Files.List())) + for i, f := range updatedObject.Files.List() { + fileIDs[i] = f.Base().ID + } + + if err := galleriesFilesTableMgr.replaceJoins(ctx, updatedObject.ID, fileIDs); err != nil { + return err + } + } + + return nil +} + +func (qb *GalleryStore) UpdatePartial(ctx context.Context, id int, partial models.GalleryPartial) (*models.Gallery, error) { + r := galleryRowRecord{ + updateRecord{ + Record: make(exp.Record), + }, + } + + r.fromPartial(partial) + + if len(r.Record) > 0 { + if err := qb.tableMgr.updateByID(ctx, id, r.Record); err != nil { return nil, err } + } - if gallery == nil { - return nil, fmt.Errorf("gallery with id %d not found", id) + if partial.PerformerIDs != nil { + if err := galleriesPerformersTableMgr.modifyJoins(ctx, id, partial.PerformerIDs.IDs, partial.PerformerIDs.Mode); err != nil { + return nil, err + } + } + if partial.TagIDs != nil { + if err := galleriesTagsTableMgr.modifyJoins(ctx, id, partial.TagIDs.IDs, partial.TagIDs.Mode); err != nil { + return nil, err + } + } + if partial.SceneIDs != nil { + if err := galleriesScenesTableMgr.modifyJoins(ctx, id, partial.SceneIDs.IDs, partial.SceneIDs.Mode); err != nil { + return nil, err + } + } + + if partial.PrimaryFileID != nil { + if err := galleriesFilesTableMgr.setPrimary(ctx, id, *partial.PrimaryFileID); err != nil { + return nil, err + } + } + + return qb.Find(ctx, id) +} + +func (qb *GalleryStore) Destroy(ctx context.Context, id int) error { + return qb.tableMgr.destroyExisting(ctx, []int{id}) +} + +func (qb *GalleryStore) selectDataset() *goqu.SelectDataset { + table := qb.table() + files := fileTableMgr.table + folders := folderTableMgr.table + galleryFolder := folderTableMgr.table.As("gallery_folder") + + return dialect.From(table).LeftJoin( + galleriesFilesJoinTable, + goqu.On( + galleriesFilesJoinTable.Col(galleryIDColumn).Eq(table.Col(idColumn)), + galleriesFilesJoinTable.Col("primary").Eq(1), + ), + ).LeftJoin( + files, + goqu.On(files.Col(idColumn).Eq(galleriesFilesJoinTable.Col(fileIDColumn))), + ).LeftJoin( + folders, + goqu.On(folders.Col(idColumn).Eq(files.Col("parent_folder_id"))), + ).LeftJoin( + galleryFolder, + goqu.On(galleryFolder.Col(idColumn).Eq(table.Col("folder_id"))), + ).Select( + qb.table().All(), + galleriesFilesJoinTable.Col(fileIDColumn).As("primary_file_id"), + folders.Col("path").As("primary_file_folder_path"), + files.Col("basename").As("primary_file_basename"), + galleryFolder.Col("path").As("folder_path"), + ) +} + +func (qb *GalleryStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Gallery, error) { + ret, err := qb.getMany(ctx, q) + if err != nil { + return nil, err + } + + if len(ret) == 0 { + return nil, sql.ErrNoRows + } + + return ret[0], nil +} + +func (qb *GalleryStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Gallery, error) { + const single = false + var ret []*models.Gallery + var lastID int + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f galleryQueryRow + if err := r.StructScan(&f); err != nil { + return err } - galleries = append(galleries, gallery) + s := f.resolve() + + if s.ID == lastID { + return fmt.Errorf("internal error: multiple rows returned for single gallery id %d", s.ID) + } + lastID = s.ID + + ret = append(ret, s) + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (qb *GalleryStore) GetFiles(ctx context.Context, id int) ([]file.File, error) { + fileIDs, err := qb.filesRepository().get(ctx, id) + if err != nil { + return nil, err + } + + // use fileStore to load files + files, err := qb.fileStore.Find(ctx, fileIDs...) + if err != nil { + return nil, err + } + + ret := make([]file.File, len(files)) + copy(ret, files) + + return ret, nil +} + +func (qb *GalleryStore) GetManyFileIDs(ctx context.Context, ids []int) ([][]file.ID, error) { + const primaryOnly = false + return qb.filesRepository().getMany(ctx, ids, primaryOnly) +} + +func (qb *GalleryStore) Find(ctx context.Context, id int) (*models.Gallery, error) { + q := qb.selectDataset().Where(qb.tableMgr.byID(id)) + + ret, err := qb.get(ctx, q) + if err != nil { + return nil, fmt.Errorf("getting gallery by id %d: %w", id, err) + } + + return ret, nil +} + +func (qb *GalleryStore) FindMany(ctx context.Context, ids []int) ([]*models.Gallery, error) { + q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(ids)) + unsorted, err := qb.getMany(ctx, q) + if err != nil { + return nil, err + } + + galleries := make([]*models.Gallery, len(ids)) + + for _, s := range unsorted { + i := intslice.IntIndex(ids, s.ID) + galleries[i] = s + } + + for i := range galleries { + if galleries[i] == nil { + return nil, fmt.Errorf("gallery with id %d not found", ids[i]) + } } return galleries, nil } -func (qb *galleryQueryBuilder) FindByChecksum(checksum string) (*models.Gallery, error) { - query := "SELECT * FROM galleries WHERE checksum = ? LIMIT 1" - args := []interface{}{checksum} - return qb.queryGallery(query, args) +func (qb *GalleryStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Gallery, error) { + table := qb.table() + + q := qb.selectDataset().Prepared(true).Where( + table.Col(idColumn).Eq( + sq, + ), + ) + + return qb.getMany(ctx, q) } -func (qb *galleryQueryBuilder) FindByChecksums(checksums []string) ([]*models.Gallery, error) { - query := "SELECT * FROM galleries WHERE checksum IN " + getInBinding(len(checksums)) - var args []interface{} - for _, checksum := range checksums { - args = append(args, checksum) +func (qb *GalleryStore) FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Gallery, error) { + sq := dialect.From(galleriesFilesJoinTable).Select(galleriesFilesJoinTable.Col(galleryIDColumn)).Where( + galleriesFilesJoinTable.Col(fileIDColumn).Eq(fileID), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting gallery by file id %d: %w", fileID, err) } - return qb.queryGalleries(query, args) + + return ret, nil } -func (qb *galleryQueryBuilder) FindByPath(path string) (*models.Gallery, error) { - query := "SELECT * FROM galleries WHERE path = ? LIMIT 1" - args := []interface{}{path} - return qb.queryGallery(query, args) +func (qb *GalleryStore) CountByFileID(ctx context.Context, fileID file.ID) (int, error) { + joinTable := galleriesFilesJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(fileIDColumn).Eq(fileID)) + return count(ctx, q) } -func (qb *galleryQueryBuilder) FindBySceneID(sceneID int) ([]*models.Gallery, error) { - query := selectAll(galleryTable) + ` - LEFT JOIN scenes_galleries as scenes_join on scenes_join.gallery_id = galleries.id - WHERE scenes_join.scene_id = ? - GROUP BY galleries.id - ` - args := []interface{}{sceneID} - return qb.queryGalleries(query, args) +func (qb *GalleryStore) FindByFingerprints(ctx context.Context, fp []file.Fingerprint) ([]*models.Gallery, error) { + fingerprintTable := fingerprintTableMgr.table + + var ex []exp.Expression + + for _, v := range fp { + ex = append(ex, goqu.And( + fingerprintTable.Col("type").Eq(v.Type), + fingerprintTable.Col("fingerprint").Eq(v.Fingerprint), + )) + } + + sq := dialect.From(galleriesFilesJoinTable). + InnerJoin( + fingerprintTable, + goqu.On(fingerprintTable.Col(fileIDColumn).Eq(galleriesFilesJoinTable.Col(fileIDColumn))), + ). + Select(galleriesFilesJoinTable.Col(galleryIDColumn)).Where(goqu.Or(ex...)) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting gallery by fingerprints: %w", err) + } + + return ret, nil } -func (qb *galleryQueryBuilder) FindByImageID(imageID int) ([]*models.Gallery, error) { - query := selectAll(galleryTable) + ` - INNER JOIN galleries_images as images_join on images_join.gallery_id = galleries.id - WHERE images_join.image_id = ? - GROUP BY galleries.id - ` - args := []interface{}{imageID} - return qb.queryGalleries(query, args) +func (qb *GalleryStore) FindByChecksum(ctx context.Context, checksum string) ([]*models.Gallery, error) { + return qb.FindByFingerprints(ctx, []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: checksum, + }, + }) } -func (qb *galleryQueryBuilder) CountByImageID(imageID int) (int, error) { - query := `SELECT image_id FROM galleries_images - WHERE image_id = ? - GROUP BY gallery_id` - args := []interface{}{imageID} - return qb.runCountQuery(qb.buildCountQuery(query), args) +func (qb *GalleryStore) FindByChecksums(ctx context.Context, checksums []string) ([]*models.Gallery, error) { + fingerprints := make([]file.Fingerprint, len(checksums)) + + for i, c := range checksums { + fingerprints[i] = file.Fingerprint{ + Type: file.FingerprintTypeMD5, + Fingerprint: c, + } + } + return qb.FindByFingerprints(ctx, fingerprints) } -func (qb *galleryQueryBuilder) Count() (int, error) { - return qb.runCountQuery(qb.buildCountQuery("SELECT galleries.id FROM galleries"), nil) +func (qb *GalleryStore) FindByPath(ctx context.Context, p string) ([]*models.Gallery, error) { + table := qb.table() + filesTable := fileTableMgr.table + fileFoldersTable := folderTableMgr.table.As("file_folders") + foldersTable := folderTableMgr.table + + basename := filepath.Base(p) + dir := filepath.Dir(p) + + sq := dialect.From(table).LeftJoin( + galleriesFilesJoinTable, + goqu.On(galleriesFilesJoinTable.Col(galleryIDColumn).Eq(table.Col(idColumn))), + ).LeftJoin( + filesTable, + goqu.On(filesTable.Col(idColumn).Eq(galleriesFilesJoinTable.Col(fileIDColumn))), + ).LeftJoin( + fileFoldersTable, + goqu.On(fileFoldersTable.Col(idColumn).Eq(filesTable.Col("parent_folder_id"))), + ).LeftJoin( + foldersTable, + goqu.On(foldersTable.Col(idColumn).Eq(table.Col("folder_id"))), + ).Select(table.Col(idColumn)).Where( + goqu.Or( + goqu.And( + fileFoldersTable.Col("path").Eq(dir), + filesTable.Col("basename").Eq(basename), + ), + foldersTable.Col("path").Eq(p), + ), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("getting gallery by path %s: %w", p, err) + } + + return ret, nil } -func (qb *galleryQueryBuilder) All() ([]*models.Gallery, error) { - return qb.queryGalleries(selectAll("galleries")+qb.getGallerySort(nil), nil) +func (qb *GalleryStore) FindByFolderID(ctx context.Context, folderID file.FolderID) ([]*models.Gallery, error) { + table := qb.table() + + sq := dialect.From(table).Select(table.Col(idColumn)).Where( + table.Col("folder_id").Eq(folderID), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting galleries for folder %d: %w", folderID, err) + } + + return ret, nil } -func (qb *galleryQueryBuilder) validateFilter(galleryFilter *models.GalleryFilterType) error { +func (qb *GalleryStore) FindBySceneID(ctx context.Context, sceneID int) ([]*models.Gallery, error) { + sq := dialect.From(galleriesScenesJoinTable).Select(galleriesScenesJoinTable.Col(galleryIDColumn)).Where( + galleriesScenesJoinTable.Col(sceneIDColumn).Eq(sceneID), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting galleries for scene %d: %w", sceneID, err) + } + + return ret, nil +} + +func (qb *GalleryStore) FindByImageID(ctx context.Context, imageID int) ([]*models.Gallery, error) { + sq := dialect.From(galleriesImagesJoinTable).Select(galleriesImagesJoinTable.Col(galleryIDColumn)).Where( + galleriesImagesJoinTable.Col(imageIDColumn).Eq(imageID), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting galleries for image %d: %w", imageID, err) + } + + return ret, nil +} + +func (qb *GalleryStore) CountByImageID(ctx context.Context, imageID int) (int, error) { + joinTable := galleriesImagesJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(imageIDColumn).Eq(imageID)) + return count(ctx, q) +} + +func (qb *GalleryStore) FindUserGalleryByTitle(ctx context.Context, title string) ([]*models.Gallery, error) { + table := qb.table() + + sq := dialect.From(table).LeftJoin( + galleriesFilesJoinTable, + goqu.On(galleriesFilesJoinTable.Col(galleryIDColumn).Eq(table.Col(idColumn))), + ).Select(table.Col(idColumn)).Where( + table.Col("folder_id").IsNull(), + galleriesFilesJoinTable.Col("file_id").IsNull(), + table.Col("title").Eq(title), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting user galleries for title %s: %w", title, err) + } + + return ret, nil +} + +func (qb *GalleryStore) Count(ctx context.Context) (int, error) { + q := dialect.Select(goqu.COUNT("*")).From(qb.table()) + return count(ctx, q) +} + +func (qb *GalleryStore) All(ctx context.Context) ([]*models.Gallery, error) { + return qb.getMany(ctx, qb.selectDataset()) +} + +func (qb *GalleryStore) validateFilter(galleryFilter *models.GalleryFilterType) error { const and = "AND" const or = "OR" const not = "NOT" @@ -190,43 +612,78 @@ func (qb *galleryQueryBuilder) validateFilter(galleryFilter *models.GalleryFilte return nil } -func (qb *galleryQueryBuilder) makeFilter(galleryFilter *models.GalleryFilterType) *filterBuilder { +func (qb *GalleryStore) makeFilter(ctx context.Context, galleryFilter *models.GalleryFilterType) *filterBuilder { query := &filterBuilder{} if galleryFilter.And != nil { - query.and(qb.makeFilter(galleryFilter.And)) + query.and(qb.makeFilter(ctx, galleryFilter.And)) } if galleryFilter.Or != nil { - query.or(qb.makeFilter(galleryFilter.Or)) + query.or(qb.makeFilter(ctx, galleryFilter.Or)) } if galleryFilter.Not != nil { - query.not(qb.makeFilter(galleryFilter.Not)) + query.not(qb.makeFilter(ctx, galleryFilter.Not)) } - query.handleCriterion(stringCriterionHandler(galleryFilter.Title, "galleries.title")) - query.handleCriterion(stringCriterionHandler(galleryFilter.Details, "galleries.details")) - query.handleCriterion(stringCriterionHandler(galleryFilter.Checksum, "galleries.checksum")) - query.handleCriterion(boolCriterionHandler(galleryFilter.IsZip, "galleries.zip")) - query.handleCriterion(stringCriterionHandler(galleryFilter.Path, "galleries.path")) - query.handleCriterion(intCriterionHandler(galleryFilter.Rating, "galleries.rating")) - query.handleCriterion(stringCriterionHandler(galleryFilter.URL, "galleries.url")) - query.handleCriterion(boolCriterionHandler(galleryFilter.Organized, "galleries.organized")) - query.handleCriterion(galleryIsMissingCriterionHandler(qb, galleryFilter.IsMissing)) - query.handleCriterion(galleryTagsCriterionHandler(qb, galleryFilter.Tags)) - query.handleCriterion(galleryTagCountCriterionHandler(qb, galleryFilter.TagCount)) - query.handleCriterion(galleryPerformersCriterionHandler(qb, galleryFilter.Performers)) - query.handleCriterion(galleryPerformerCountCriterionHandler(qb, galleryFilter.PerformerCount)) - query.handleCriterion(galleryStudioCriterionHandler(qb, galleryFilter.Studios)) - query.handleCriterion(galleryPerformerTagsCriterionHandler(qb, galleryFilter.PerformerTags)) - query.handleCriterion(galleryAverageResolutionCriterionHandler(qb, galleryFilter.AverageResolution)) - query.handleCriterion(galleryImageCountCriterionHandler(qb, galleryFilter.ImageCount)) - query.handleCriterion(galleryPerformerFavoriteCriterionHandler(galleryFilter.PerformerFavorite)) - query.handleCriterion(galleryPerformerAgeCriterionHandler(galleryFilter.PerformerAge)) + query.handleCriterion(ctx, stringCriterionHandler(galleryFilter.Title, "galleries.title")) + query.handleCriterion(ctx, stringCriterionHandler(galleryFilter.Details, "galleries.details")) + + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { + if galleryFilter.Checksum != nil { + qb.addGalleriesFilesTable(f) + f.addLeftJoin(fingerprintTable, "fingerprints_md5", "galleries_files.file_id = fingerprints_md5.file_id AND fingerprints_md5.type = 'md5'") + } + + stringCriterionHandler(galleryFilter.Checksum, "fingerprints_md5.fingerprint")(ctx, f) + })) + + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { + if galleryFilter.IsZip != nil { + qb.addGalleriesFilesTable(f) + if *galleryFilter.IsZip { + + f.addWhere("galleries_files.file_id IS NOT NULL") + } else { + f.addWhere("galleries_files.file_id IS NULL") + } + } + })) + + query.handleCriterion(ctx, qb.galleryPathCriterionHandler(galleryFilter.Path)) + query.handleCriterion(ctx, galleryFileCountCriterionHandler(qb, galleryFilter.FileCount)) + query.handleCriterion(ctx, intCriterionHandler(galleryFilter.Rating, "galleries.rating", nil)) + query.handleCriterion(ctx, stringCriterionHandler(galleryFilter.URL, "galleries.url")) + query.handleCriterion(ctx, boolCriterionHandler(galleryFilter.Organized, "galleries.organized", nil)) + query.handleCriterion(ctx, galleryIsMissingCriterionHandler(qb, galleryFilter.IsMissing)) + query.handleCriterion(ctx, galleryTagsCriterionHandler(qb, galleryFilter.Tags)) + query.handleCriterion(ctx, galleryTagCountCriterionHandler(qb, galleryFilter.TagCount)) + query.handleCriterion(ctx, galleryPerformersCriterionHandler(qb, galleryFilter.Performers)) + query.handleCriterion(ctx, galleryPerformerCountCriterionHandler(qb, galleryFilter.PerformerCount)) + query.handleCriterion(ctx, galleryStudioCriterionHandler(qb, galleryFilter.Studios)) + query.handleCriterion(ctx, galleryPerformerTagsCriterionHandler(qb, galleryFilter.PerformerTags)) + query.handleCriterion(ctx, galleryAverageResolutionCriterionHandler(qb, galleryFilter.AverageResolution)) + query.handleCriterion(ctx, galleryImageCountCriterionHandler(qb, galleryFilter.ImageCount)) + query.handleCriterion(ctx, galleryPerformerFavoriteCriterionHandler(galleryFilter.PerformerFavorite)) + query.handleCriterion(ctx, galleryPerformerAgeCriterionHandler(galleryFilter.PerformerAge)) return query } -func (qb *galleryQueryBuilder) makeQuery(galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) (*queryBuilder, error) { +func (qb *GalleryStore) addGalleriesFilesTable(f *filterBuilder) { + f.addLeftJoin(galleriesFilesTable, "", "galleries_files.gallery_id = galleries.id") +} + +func (qb *GalleryStore) addFilesTable(f *filterBuilder) { + qb.addGalleriesFilesTable(f) + f.addLeftJoin(fileTable, "", "galleries_files.file_id = files.id") +} + +func (qb *GalleryStore) addFoldersTable(f *filterBuilder) { + qb.addFilesTable(f) + f.addLeftJoin(folderTable, "", "files.parent_folder_id = folders.id") +} + +func (qb *GalleryStore) makeQuery(ctx context.Context, galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) (*queryBuilder, error) { if galleryFilter == nil { galleryFilter = &models.GalleryFilterType{} } @@ -238,36 +695,62 @@ func (qb *galleryQueryBuilder) makeQuery(galleryFilter *models.GalleryFilterType distinctIDs(&query, galleryTable) if q := findFilter.Q; q != nil && *q != "" { - searchColumns := []string{"galleries.title", "galleries.path", "galleries.checksum"} + query.addJoins( + join{ + table: galleriesFilesTable, + onClause: "galleries_files.gallery_id = galleries.id", + }, + join{ + table: fileTable, + onClause: "galleries_files.file_id = files.id", + }, + join{ + table: folderTable, + onClause: "files.parent_folder_id = folders.id", + }, + join{ + table: fingerprintTable, + onClause: "files_fingerprints.file_id = galleries_files.file_id", + }, + join{ + table: folderTable, + as: "gallery_folder", + onClause: "galleries.folder_id = gallery_folder.id", + }, + ) + + // add joins for files and checksum + searchColumns := []string{"galleries.title", "gallery_folder.path", "folders.path", "files.basename", "files_fingerprints.fingerprint"} query.parseQueryString(searchColumns, *q) } if err := qb.validateFilter(galleryFilter); err != nil { return nil, err } - filter := qb.makeFilter(galleryFilter) + filter := qb.makeFilter(ctx, galleryFilter) query.addFilter(filter) - query.sortAndPagination = qb.getGallerySort(findFilter) + getPagination(findFilter) + qb.setGallerySort(&query, findFilter) + query.sortAndPagination += getPagination(findFilter) return &query, nil } -func (qb *galleryQueryBuilder) Query(galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) ([]*models.Gallery, int, error) { - query, err := qb.makeQuery(galleryFilter, findFilter) +func (qb *GalleryStore) Query(ctx context.Context, galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) ([]*models.Gallery, int, error) { + query, err := qb.makeQuery(ctx, galleryFilter, findFilter) if err != nil { return nil, 0, err } - idsResult, countResult, err := query.executeFind() + idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err } var galleries []*models.Gallery for _, id := range idsResult { - gallery, err := qb.Find(id) + gallery, err := qb.Find(ctx, id) if err != nil { return nil, 0, err } @@ -278,17 +761,92 @@ func (qb *galleryQueryBuilder) Query(galleryFilter *models.GalleryFilterType, fi return galleries, countResult, nil } -func (qb *galleryQueryBuilder) QueryCount(galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) (int, error) { - query, err := qb.makeQuery(galleryFilter, findFilter) +func (qb *GalleryStore) QueryCount(ctx context.Context, galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) (int, error) { + query, err := qb.makeQuery(ctx, galleryFilter, findFilter) if err != nil { return 0, err } - return query.executeCount() + return query.executeCount(ctx) } -func galleryIsMissingCriterionHandler(qb *galleryQueryBuilder, isMissing *string) criterionHandlerFunc { - return func(f *filterBuilder) { +func (qb *GalleryStore) galleryPathCriterionHandler(c *models.StringCriterionInput) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { + if c != nil { + qb.addFoldersTable(f) + f.addLeftJoin(folderTable, "gallery_folder", "galleries.folder_id = gallery_folder.id") + + const pathColumn = "folders.path" + const basenameColumn = "files.basename" + const folderPathColumn = "gallery_folder.path" + + addWildcards := true + not := false + + if modifier := c.Modifier; c.Modifier.IsValid() { + switch modifier { + case models.CriterionModifierIncludes: + clause := getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not) + clause2 := getStringSearchClause([]string{folderPathColumn}, c.Value, false) + f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) + case models.CriterionModifierExcludes: + not = true + clause := getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not) + clause2 := getStringSearchClause([]string{folderPathColumn}, c.Value, true) + f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) + case models.CriterionModifierEquals: + addWildcards = false + clause := getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not) + clause2 := makeClause(folderPathColumn+" LIKE ?", c.Value) + f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) + case models.CriterionModifierNotEquals: + addWildcards = false + not = true + clause := getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not) + clause2 := makeClause(folderPathColumn+" NOT LIKE ?", c.Value) + f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) + case models.CriterionModifierMatchesRegex: + if _, err := regexp.Compile(c.Value); err != nil { + f.setError(err) + return + } + clause := makeClause(fmt.Sprintf("(%s IS NOT NULL AND %[1]s regexp ?) OR (%s IS NOT NULL AND %[2]s regexp ?)", pathColumn, basenameColumn), c.Value, c.Value) + clause2 := makeClause(fmt.Sprintf("(%s IS NOT NULL AND %[1]s regexp ?)", folderPathColumn), c.Value) + f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) + case models.CriterionModifierNotMatchesRegex: + if _, err := regexp.Compile(c.Value); err != nil { + f.setError(err) + return + } + f.addWhere(fmt.Sprintf("(%s IS NULL OR %[1]s NOT regexp ?) AND (%s IS NULL OR %[2]s NOT regexp ?)", pathColumn, basenameColumn), c.Value, c.Value) + f.addWhere(fmt.Sprintf("(%s IS NULL OR %[1]s NOT regexp ?)", folderPathColumn), c.Value) + case models.CriterionModifierIsNull: + f.whereClauses = append(f.whereClauses, makeClause(fmt.Sprintf("(%s IS NULL OR TRIM(%[1]s) = '' OR %s IS NULL OR TRIM(%[2]s) = '')", pathColumn, basenameColumn))) + f.whereClauses = append(f.whereClauses, makeClause(fmt.Sprintf("(%s IS NULL OR TRIM(%[1]s) = '')", folderPathColumn))) + case models.CriterionModifierNotNull: + clause := makeClause(fmt.Sprintf("(%s IS NOT NULL AND TRIM(%[1]s) != '' AND %s IS NOT NULL AND TRIM(%[2]s) != '')", pathColumn, basenameColumn)) + clause2 := makeClause(fmt.Sprintf("(%s IS NOT NULL AND TRIM(%[1]s) != '')", folderPathColumn)) + f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) + default: + panic("unsupported string filter modifier") + } + } + } + } +} + +func galleryFileCountCriterionHandler(qb *GalleryStore, fileCount *models.IntCriterionInput) criterionHandlerFunc { + h := countCriterionHandlerBuilder{ + primaryTable: galleryTable, + joinTable: galleriesFilesTable, + primaryFK: galleryIDColumn, + } + + return h.handler(fileCount) +} + +func galleryIsMissingCriterionHandler(qb *GalleryStore, isMissing *string) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if isMissing != nil && *isMissing != "" { switch *isMissing { case "scenes": @@ -311,7 +869,7 @@ func galleryIsMissingCriterionHandler(qb *galleryQueryBuilder, isMissing *string } } -func galleryTagsCriterionHandler(qb *galleryQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { +func galleryTagsCriterionHandler(qb *GalleryStore, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { h := joinedHierarchicalMultiCriterionHandlerBuilder{ tx: qb.tx, @@ -328,7 +886,7 @@ func galleryTagsCriterionHandler(qb *galleryQueryBuilder, tags *models.Hierarchi return h.handler(tags) } -func galleryTagCountCriterionHandler(qb *galleryQueryBuilder, tagCount *models.IntCriterionInput) criterionHandlerFunc { +func galleryTagCountCriterionHandler(qb *GalleryStore, tagCount *models.IntCriterionInput) criterionHandlerFunc { h := countCriterionHandlerBuilder{ primaryTable: galleryTable, joinTable: galleriesTagsTable, @@ -338,7 +896,7 @@ func galleryTagCountCriterionHandler(qb *galleryQueryBuilder, tagCount *models.I return h.handler(tagCount) } -func galleryPerformersCriterionHandler(qb *galleryQueryBuilder, performers *models.MultiCriterionInput) criterionHandlerFunc { +func galleryPerformersCriterionHandler(qb *GalleryStore, performers *models.MultiCriterionInput) criterionHandlerFunc { h := joinedMultiCriterionHandlerBuilder{ primaryTable: galleryTable, joinTable: performersGalleriesTable, @@ -354,7 +912,7 @@ func galleryPerformersCriterionHandler(qb *galleryQueryBuilder, performers *mode return h.handler(performers) } -func galleryPerformerCountCriterionHandler(qb *galleryQueryBuilder, performerCount *models.IntCriterionInput) criterionHandlerFunc { +func galleryPerformerCountCriterionHandler(qb *GalleryStore, performerCount *models.IntCriterionInput) criterionHandlerFunc { h := countCriterionHandlerBuilder{ primaryTable: galleryTable, joinTable: performersGalleriesTable, @@ -364,7 +922,7 @@ func galleryPerformerCountCriterionHandler(qb *galleryQueryBuilder, performerCou return h.handler(performerCount) } -func galleryImageCountCriterionHandler(qb *galleryQueryBuilder, imageCount *models.IntCriterionInput) criterionHandlerFunc { +func galleryImageCountCriterionHandler(qb *GalleryStore, imageCount *models.IntCriterionInput) criterionHandlerFunc { h := countCriterionHandlerBuilder{ primaryTable: galleryTable, joinTable: galleriesImagesTable, @@ -374,7 +932,7 @@ func galleryImageCountCriterionHandler(qb *galleryQueryBuilder, imageCount *mode return h.handler(imageCount) } -func galleryStudioCriterionHandler(qb *galleryQueryBuilder, studios *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { +func galleryStudioCriterionHandler(qb *GalleryStore, studios *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { h := hierarchicalMultiCriterionHandlerBuilder{ tx: qb.tx, @@ -388,8 +946,8 @@ func galleryStudioCriterionHandler(qb *galleryQueryBuilder, studios *models.Hier return h.handler(studios) } -func galleryPerformerTagsCriterionHandler(qb *galleryQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { +func galleryPerformerTagsCriterionHandler(qb *GalleryStore, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if tags != nil { if tags.Modifier == models.CriterionModifierIsNull || tags.Modifier == models.CriterionModifierNotNull { var notClause string @@ -408,7 +966,7 @@ func galleryPerformerTagsCriterionHandler(qb *galleryQueryBuilder, tags *models. return } - valuesClause := getHierarchicalValues(qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) + valuesClause := getHierarchicalValues(ctx, qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) f.addWith(`performer_tags AS ( SELECT pg.gallery_id, t.column1 AS root_tag_id FROM performers_galleries pg @@ -424,7 +982,7 @@ INNER JOIN (` + valuesClause + `) t ON t.column2 = pt.tag_id } func galleryPerformerFavoriteCriterionHandler(performerfavorite *bool) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if performerfavorite != nil { f.addLeftJoin("performers_galleries", "", "galleries.id = performers_galleries.gallery_id") @@ -444,7 +1002,7 @@ GROUP BY performers_galleries.gallery_id HAVING SUM(performers.favorite) = 0)`, } func galleryPerformerAgeCriterionHandler(performerAge *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if performerAge != nil { f.addInnerJoin("performers_galleries", "", "galleries.id = performers_galleries.gallery_id") f.addInnerJoin("performers", "", "performers_galleries.performer_id = performers.id") @@ -460,16 +1018,18 @@ func galleryPerformerAgeCriterionHandler(performerAge *models.IntCriterionInput) } } -func galleryAverageResolutionCriterionHandler(qb *galleryQueryBuilder, resolution *models.ResolutionCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { +func galleryAverageResolutionCriterionHandler(qb *GalleryStore, resolution *models.ResolutionCriterionInput) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if resolution != nil && resolution.Value.IsValid() { qb.imagesRepository().join(f, "images_join", "galleries.id") f.addLeftJoin("images", "", "images_join.image_id = images.id") + f.addLeftJoin("images_files", "", "images.id = images_files.image_id") + f.addLeftJoin("image_files", "", "images_files.file_id = image_files.file_id") min := resolution.Value.GetMinResolution() max := resolution.Value.GetMaxResolution() - const widthHeight = "avg(MIN(images.width, images.height))" + const widthHeight = "avg(MIN(image_files.width, image_files.height))" switch resolution.Modifier { case models.CriterionModifierEquals: @@ -485,44 +1045,76 @@ func galleryAverageResolutionCriterionHandler(qb *galleryQueryBuilder, resolutio } } -func (qb *galleryQueryBuilder) getGallerySort(findFilter *models.FindFilterType) string { +func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.FindFilterType) { if findFilter == nil || findFilter.Sort == nil || *findFilter.Sort == "" { - return "" + return } sort := findFilter.GetSort("path") direction := findFilter.GetDirection() + addFileTable := func() { + query.addJoins( + join{ + table: galleriesFilesTable, + onClause: "galleries_files.gallery_id = galleries.id", + }, + join{ + table: fileTable, + onClause: "galleries_files.file_id = files.id", + }, + ) + } + switch sort { + case "file_count": + query.sortAndPagination += getCountSort(galleryTable, galleriesFilesTable, galleryIDColumn, direction) case "images_count": - return getCountSort(galleryTable, galleriesImagesTable, galleryIDColumn, direction) + query.sortAndPagination += getCountSort(galleryTable, galleriesImagesTable, galleryIDColumn, direction) case "tag_count": - return getCountSort(galleryTable, galleriesTagsTable, galleryIDColumn, direction) + query.sortAndPagination += getCountSort(galleryTable, galleriesTagsTable, galleryIDColumn, direction) case "performer_count": - return getCountSort(galleryTable, performersGalleriesTable, galleryIDColumn, direction) + query.sortAndPagination += getCountSort(galleryTable, performersGalleriesTable, galleryIDColumn, direction) + case "path": + // special handling for path + addFileTable() + query.addJoins( + join{ + table: folderTable, + onClause: "folders.id = galleries.folder_id", + }, + join{ + table: folderTable, + as: "file_folder", + onClause: "files.parent_folder_id = file_folder.id", + }, + ) + query.sortAndPagination += fmt.Sprintf(" ORDER BY folders.path %s, file_folder.path %[1]s, files.basename %[1]s", direction) + case "file_mod_time": + sort = "mod_time" + addFileTable() + query.sortAndPagination += getSort(sort, direction, fileTable) default: - return getSort(sort, direction, "galleries") + query.sortAndPagination += getSort(sort, direction, "galleries") } } -func (qb *galleryQueryBuilder) queryGallery(query string, args []interface{}) (*models.Gallery, error) { - results, err := qb.queryGalleries(query, args) - if err != nil || len(results) < 1 { - return nil, err +func (qb *GalleryStore) filesRepository() *filesRepository { + return &filesRepository{ + repository: repository{ + tx: qb.tx, + tableName: galleriesFilesTable, + idColumn: galleryIDColumn, + }, } - return results[0], nil } -func (qb *galleryQueryBuilder) queryGalleries(query string, args []interface{}) ([]*models.Gallery, error) { - var ret models.Galleries - if err := qb.query(query, args, &ret); err != nil { - return nil, err - } - - return []*models.Gallery(ret), nil +func (qb *GalleryStore) AddFileID(ctx context.Context, id int, fileID file.ID) error { + const firstPrimary = false + return galleriesFilesTableMgr.insertJoins(ctx, id, firstPrimary, []file.ID{fileID}) } -func (qb *galleryQueryBuilder) performersRepository() *joinRepository { +func (qb *GalleryStore) performersRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -533,16 +1125,11 @@ func (qb *galleryQueryBuilder) performersRepository() *joinRepository { } } -func (qb *galleryQueryBuilder) GetPerformerIDs(galleryID int) ([]int, error) { - return qb.performersRepository().getIDs(galleryID) +func (qb *GalleryStore) GetPerformerIDs(ctx context.Context, id int) ([]int, error) { + return qb.performersRepository().getIDs(ctx, id) } -func (qb *galleryQueryBuilder) UpdatePerformers(galleryID int, performerIDs []int) error { - // Delete the existing joins and then create new ones - return qb.performersRepository().replace(galleryID, performerIDs) -} - -func (qb *galleryQueryBuilder) tagsRepository() *joinRepository { +func (qb *GalleryStore) tagsRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -553,16 +1140,11 @@ func (qb *galleryQueryBuilder) tagsRepository() *joinRepository { } } -func (qb *galleryQueryBuilder) GetTagIDs(galleryID int) ([]int, error) { - return qb.tagsRepository().getIDs(galleryID) +func (qb *GalleryStore) GetTagIDs(ctx context.Context, id int) ([]int, error) { + return qb.tagsRepository().getIDs(ctx, id) } -func (qb *galleryQueryBuilder) UpdateTags(galleryID int, tagIDs []int) error { - // Delete the existing joins and then create new ones - return qb.tagsRepository().replace(galleryID, tagIDs) -} - -func (qb *galleryQueryBuilder) imagesRepository() *joinRepository { +func (qb *GalleryStore) imagesRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -573,16 +1155,16 @@ func (qb *galleryQueryBuilder) imagesRepository() *joinRepository { } } -func (qb *galleryQueryBuilder) GetImageIDs(galleryID int) ([]int, error) { - return qb.imagesRepository().getIDs(galleryID) +func (qb *GalleryStore) GetImageIDs(ctx context.Context, galleryID int) ([]int, error) { + return qb.imagesRepository().getIDs(ctx, galleryID) } -func (qb *galleryQueryBuilder) UpdateImages(galleryID int, imageIDs []int) error { +func (qb *GalleryStore) UpdateImages(ctx context.Context, galleryID int, imageIDs []int) error { // Delete the existing joins and then create new ones - return qb.imagesRepository().replace(galleryID, imageIDs) + return qb.imagesRepository().replace(ctx, galleryID, imageIDs) } -func (qb *galleryQueryBuilder) scenesRepository() *joinRepository { +func (qb *GalleryStore) scenesRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -593,11 +1175,6 @@ func (qb *galleryQueryBuilder) scenesRepository() *joinRepository { } } -func (qb *galleryQueryBuilder) GetSceneIDs(galleryID int) ([]int, error) { - return qb.scenesRepository().getIDs(galleryID) -} - -func (qb *galleryQueryBuilder) UpdateScenes(galleryID int, sceneIDs []int) error { - // Delete the existing joins and then create new ones - return qb.scenesRepository().replace(galleryID, sceneIDs) +func (qb *GalleryStore) GetSceneIDs(ctx context.Context, id int) ([]int, error) { + return qb.scenesRepository().getIDs(ctx, id) } diff --git a/pkg/sqlite/gallery_test.go b/pkg/sqlite/gallery_test.go index f9aa9ef5e..ed82df7e7 100644 --- a/pkg/sqlite/gallery_test.go +++ b/pkg/sqlite/gallery_test.go @@ -4,140 +4,1398 @@ package sqlite_test import ( + "context" "math" "strconv" "testing" + "time" - "github.com/stretchr/testify/assert" - + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" + "github.com/stretchr/testify/assert" ) -func TestGalleryFind(t *testing.T) { - withTxn(func(r models.Repository) error { - gqb := r.Gallery() +var invalidID = -1 - const galleryIdx = 0 - gallery, err := gqb.Find(galleryIDs[galleryIdx]) - - if err != nil { - t.Errorf("Error finding gallery: %s", err.Error()) +func loadGalleryRelationships(ctx context.Context, expected models.Gallery, actual *models.Gallery) error { + if expected.SceneIDs.Loaded() { + if err := actual.LoadSceneIDs(ctx, db.Gallery); err != nil { + return err } - - assert.Equal(t, getGalleryStringValue(galleryIdx, "Path"), gallery.Path.String) - - gallery, err = gqb.Find(0) - - if err != nil { - t.Errorf("Error finding gallery: %s", err.Error()) + } + if expected.TagIDs.Loaded() { + if err := actual.LoadTagIDs(ctx, db.Gallery); err != nil { + return err } + } + if expected.PerformerIDs.Loaded() { + if err := actual.LoadPerformerIDs(ctx, db.Gallery); err != nil { + return err + } + } + if expected.Files.Loaded() { + if err := actual.LoadFiles(ctx, db.Gallery); err != nil { + return err + } + } - assert.Nil(t, gallery) + // clear Path, Checksum, PrimaryFileID + if expected.Path == "" { + actual.Path = "" + } + if expected.PrimaryFileID == nil { + actual.PrimaryFileID = nil + } - return nil - }) + return nil } -func TestGalleryFindByChecksum(t *testing.T) { - withTxn(func(r models.Repository) error { - gqb := r.Gallery() +func Test_galleryQueryBuilder_Create(t *testing.T) { + var ( + title = "title" + url = "url" + rating = 3 + details = "details" + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) - const galleryIdx = 0 - galleryChecksum := getGalleryStringValue(galleryIdx, "Checksum") - gallery, err := gqb.FindByChecksum(galleryChecksum) + galleryFile = makeFileWithID(fileIdxStartGalleryFiles) + ) - if err != nil { - t.Errorf("Error finding gallery: %s", err.Error()) - } + date := models.NewDate("2003-02-01") - assert.Equal(t, getGalleryStringValue(galleryIdx, "Path"), gallery.Path.String) + tests := []struct { + name string + newObject models.Gallery + wantErr bool + }{ + { + "full", + models.Gallery{ + Title: title, + URL: url, + Date: &date, + Details: details, + Rating: &rating, + Organized: true, + StudioID: &studioIDs[studioIdxWithScene], + CreatedAt: createdAt, + UpdatedAt: updatedAt, + SceneIDs: models.NewRelatedIDs([]int{sceneIDs[sceneIdx1WithPerformer], sceneIDs[sceneIdx1WithStudio]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]}), + }, + false, + }, + { + "with file", + models.Gallery{ + Title: title, + URL: url, + Date: &date, + Details: details, + Rating: &rating, + Organized: true, + StudioID: &studioIDs[studioIdxWithScene], + Files: models.NewRelatedFiles([]file.File{ + galleryFile, + }), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + SceneIDs: models.NewRelatedIDs([]int{sceneIDs[sceneIdx1WithPerformer], sceneIDs[sceneIdx1WithStudio]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]}), + }, + false, + }, + { + "invalid studio id", + models.Gallery{ + StudioID: &invalidID, + }, + true, + }, + { + "invalid scene id", + models.Gallery{ + SceneIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid tag id", + models.Gallery{ + TagIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid performer id", + models.Gallery{ + PerformerIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + } - galleryChecksum = "not exist" - gallery, err = gqb.FindByChecksum(galleryChecksum) + qb := db.Gallery - if err != nil { - t.Errorf("Error finding gallery: %s", err.Error()) - } + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) - assert.Nil(t, gallery) + s := tt.newObject + var fileIDs []file.ID + if s.Files.Loaded() { + fileIDs = []file.ID{s.Files.List()[0].Base().ID} + } - return nil - }) + if err := qb.Create(ctx, &s, fileIDs); (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.Create() error = %v, wantErr = %v", err, tt.wantErr) + } + + if tt.wantErr { + assert.Zero(s.ID) + return + } + + assert.NotZero(s.ID) + + copy := tt.newObject + copy.ID = s.ID + + // load relationships + if err := loadGalleryRelationships(ctx, copy, &s); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + assert.Equal(copy, s) + + // ensure can find the scene + found, err := qb.Find(ctx, s.ID) + if err != nil { + t.Errorf("galleryQueryBuilder.Find() error = %v", err) + } + + if !assert.NotNil(found) { + return + } + + // load relationships + if err := loadGalleryRelationships(ctx, copy, found); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + assert.Equal(copy, *found) + + return + }) + } } -func TestGalleryFindByPath(t *testing.T) { - withTxn(func(r models.Repository) error { - gqb := r.Gallery() - - const galleryIdx = 0 - galleryPath := getGalleryStringValue(galleryIdx, "Path") - gallery, err := gqb.FindByPath(galleryPath) - - if err != nil { - t.Errorf("Error finding gallery: %s", err.Error()) - } - - assert.Equal(t, galleryPath, gallery.Path.String) - - galleryPath = "not exist" - gallery, err = gqb.FindByPath(galleryPath) - - if err != nil { - t.Errorf("Error finding gallery: %s", err.Error()) - } - - assert.Nil(t, gallery) - - return nil - }) +func makeGalleryFileWithID(i int) *file.BaseFile { + ret := makeGalleryFile(i) + ret.ID = galleryFileIDs[i] + return ret } -func TestGalleryFindBySceneID(t *testing.T) { - withTxn(func(r models.Repository) error { - gqb := r.Gallery() +func Test_galleryQueryBuilder_Update(t *testing.T) { + var ( + title = "title" + url = "url" + rating = 3 + details = "details" + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + ) - sceneID := sceneIDs[sceneIdxWithGallery] - galleries, err := gqb.FindBySceneID(sceneID) + date := models.NewDate("2003-02-01") - if err != nil { - t.Errorf("Error finding gallery: %s", err.Error()) + tests := []struct { + name string + updatedObject *models.Gallery + wantErr bool + }{ + { + "full", + &models.Gallery{ + ID: galleryIDs[galleryIdxWithScene], + Title: title, + URL: url, + Date: &date, + Details: details, + Rating: &rating, + Organized: true, + StudioID: &studioIDs[studioIdxWithScene], + Files: models.NewRelatedFiles([]file.File{ + makeGalleryFileWithID(galleryIdxWithScene), + }), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + SceneIDs: models.NewRelatedIDs([]int{sceneIDs[sceneIdx1WithPerformer], sceneIDs[sceneIdx1WithStudio]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]}), + }, + false, + }, + { + "clear nullables", + &models.Gallery{ + ID: galleryIDs[galleryIdxWithImage], + SceneIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Organized: true, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "clear scene ids", + &models.Gallery{ + ID: galleryIDs[galleryIdxWithScene], + SceneIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Organized: true, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "clear tag ids", + &models.Gallery{ + ID: galleryIDs[galleryIdxWithTag], + SceneIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Organized: true, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "clear performer ids", + &models.Gallery{ + ID: galleryIDs[galleryIdxWithPerformer], + SceneIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Organized: true, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "invalid studio id", + &models.Gallery{ + ID: galleryIDs[galleryIdxWithImage], + Organized: true, + StudioID: &invalidID, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + { + "invalid scene id", + &models.Gallery{ + ID: galleryIDs[galleryIdxWithImage], + Organized: true, + SceneIDs: models.NewRelatedIDs([]int{invalidID}), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + { + "invalid tag id", + &models.Gallery{ + ID: galleryIDs[galleryIdxWithImage], + Organized: true, + TagIDs: models.NewRelatedIDs([]int{invalidID}), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + { + "invalid performer id", + &models.Gallery{ + ID: galleryIDs[galleryIdxWithImage], + Organized: true, + PerformerIDs: models.NewRelatedIDs([]int{invalidID}), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + } + + qb := db.Gallery + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + copy := *tt.updatedObject + + if err := qb.Update(ctx, tt.updatedObject); (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.Update() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wantErr { + return + } + + s, err := qb.Find(ctx, tt.updatedObject.ID) + if err != nil { + t.Errorf("galleryQueryBuilder.Find() error = %v", err) + return + } + + // load relationships + if err := loadGalleryRelationships(ctx, copy, s); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + assert.Equal(copy, *s) + + return + }) + } +} + +func clearGalleryFileIDs(gallery *models.Gallery) { + if gallery.Files.Loaded() { + for _, f := range gallery.Files.List() { + f.Base().ID = 0 } + } +} - assert.Equal(t, getGalleryStringValue(galleryIdxWithScene, "Path"), galleries[0].Path.String) +func clearGalleryPartial() models.GalleryPartial { + // leave mandatory fields + return models.GalleryPartial{ + Title: models.OptionalString{Set: true, Null: true}, + Details: models.OptionalString{Set: true, Null: true}, + URL: models.OptionalString{Set: true, Null: true}, + Date: models.OptionalDate{Set: true, Null: true}, + Rating: models.OptionalInt{Set: true, Null: true}, + StudioID: models.OptionalInt{Set: true, Null: true}, + TagIDs: &models.UpdateIDs{Mode: models.RelationshipUpdateModeSet}, + PerformerIDs: &models.UpdateIDs{Mode: models.RelationshipUpdateModeSet}, + } +} - galleries, err = gqb.FindBySceneID(0) +func Test_galleryQueryBuilder_UpdatePartial(t *testing.T) { + var ( + title = "title" + details = "details" + url = "url" + rating = 3 + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) - if err != nil { - t.Errorf("Error finding gallery: %s", err.Error()) + date = models.NewDate("2003-02-01") + ) + + tests := []struct { + name string + id int + partial models.GalleryPartial + want models.Gallery + wantErr bool + }{ + { + "full", + galleryIDs[galleryIdxWithImage], + models.GalleryPartial{ + Title: models.NewOptionalString(title), + Details: models.NewOptionalString(details), + URL: models.NewOptionalString(url), + Date: models.NewOptionalDate(date), + Rating: models.NewOptionalInt(rating), + Organized: models.NewOptionalBool(true), + StudioID: models.NewOptionalInt(studioIDs[studioIdxWithGallery]), + CreatedAt: models.NewOptionalTime(createdAt), + UpdatedAt: models.NewOptionalTime(updatedAt), + + SceneIDs: &models.UpdateIDs{ + IDs: []int{sceneIDs[sceneIdxWithGallery]}, + Mode: models.RelationshipUpdateModeSet, + }, + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithGallery], tagIDs[tagIdx1WithDupName]}, + Mode: models.RelationshipUpdateModeSet, + }, + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithGallery], performerIDs[performerIdx1WithDupName]}, + Mode: models.RelationshipUpdateModeSet, + }, + }, + models.Gallery{ + ID: galleryIDs[galleryIdxWithImage], + Title: title, + Details: details, + URL: url, + Date: &date, + Rating: &rating, + Organized: true, + StudioID: &studioIDs[studioIdxWithGallery], + Files: models.NewRelatedFiles([]file.File{ + makeGalleryFile(galleryIdxWithImage), + }), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + SceneIDs: models.NewRelatedIDs([]int{sceneIDs[sceneIdxWithGallery]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithGallery], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithGallery], performerIDs[performerIdx1WithDupName]}), + }, + false, + }, + { + "clear all", + galleryIDs[galleryIdxWithImage], + clearGalleryPartial(), + models.Gallery{ + ID: galleryIDs[galleryIdxWithImage], + Files: models.NewRelatedFiles([]file.File{ + makeGalleryFile(galleryIdxWithImage), + }), + SceneIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + }, + false, + }, + { + "invalid id", + invalidID, + models.GalleryPartial{}, + models.Gallery{}, + true, + }, + } + for _, tt := range tests { + qb := db.Gallery + + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + got, err := qb.UpdatePartial(ctx, tt.id, tt.partial) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.UpdatePartial() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + // load relationships + if err := loadGalleryRelationships(ctx, tt.want, got); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + clearGalleryFileIDs(got) + assert.Equal(tt.want, *got) + + s, err := qb.Find(ctx, tt.id) + if err != nil { + t.Errorf("galleryQueryBuilder.Find() error = %v", err) + } + + // load relationships + if err := loadGalleryRelationships(ctx, tt.want, s); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + clearGalleryFileIDs(s) + assert.Equal(tt.want, *s) + }) + } +} + +func Test_galleryQueryBuilder_UpdatePartialRelationships(t *testing.T) { + tests := []struct { + name string + id int + partial models.GalleryPartial + want models.Gallery + wantErr bool + }{ + { + "add scenes", + galleryIDs[galleryIdx1WithImage], + models.GalleryPartial{ + SceneIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[sceneIdx1WithStudio], tagIDs[sceneIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Gallery{ + SceneIDs: models.NewRelatedIDs(append(indexesToIDs(sceneIDs, sceneGalleries.reverseLookup(galleryIdx1WithImage)), + sceneIDs[sceneIdx1WithStudio], + sceneIDs[sceneIdx1WithPerformer], + )), + }, + false, + }, + { + "add tags", + galleryIDs[galleryIdxWithTwoTags], + models.GalleryPartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithDupName], tagIDs[tagIdx1WithImage]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Gallery{ + TagIDs: models.NewRelatedIDs(append(indexesToIDs(tagIDs, galleryTags[galleryIdxWithTwoTags]), + tagIDs[tagIdx1WithDupName], + tagIDs[tagIdx1WithImage], + )), + }, + false, + }, + { + "add performers", + galleryIDs[galleryIdxWithTwoPerformers], + models.GalleryPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithDupName], performerIDs[performerIdx1WithImage]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Gallery{ + PerformerIDs: models.NewRelatedIDs(append(indexesToIDs(performerIDs, galleryPerformers[galleryIdxWithTwoPerformers]), + performerIDs[performerIdx1WithDupName], + performerIDs[performerIdx1WithImage], + )), + }, + false, + }, + { + "add duplicate scenes", + galleryIDs[galleryIdxWithScene], + models.GalleryPartial{ + SceneIDs: &models.UpdateIDs{ + IDs: []int{sceneIDs[sceneIdxWithGallery], sceneIDs[sceneIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Gallery{ + SceneIDs: models.NewRelatedIDs(append(indexesToIDs(sceneIDs, sceneGalleries.reverseLookup(galleryIdxWithScene)), + sceneIDs[sceneIdx1WithPerformer], + )), + }, + false, + }, + { + "add duplicate tags", + galleryIDs[galleryIdxWithTwoTags], + models.GalleryPartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithGallery], tagIDs[tagIdx1WithScene]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Gallery{ + TagIDs: models.NewRelatedIDs(append(indexesToIDs(tagIDs, galleryTags[galleryIdxWithTwoTags]), + tagIDs[tagIdx1WithScene], + )), + }, + false, + }, + { + "add duplicate performers", + galleryIDs[galleryIdxWithTwoPerformers], + models.GalleryPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithGallery], performerIDs[performerIdx1WithScene]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Gallery{ + PerformerIDs: models.NewRelatedIDs(append(indexesToIDs(performerIDs, galleryPerformers[galleryIdxWithTwoPerformers]), + performerIDs[performerIdx1WithScene], + )), + }, + false, + }, + { + "add invalid scenes", + galleryIDs[galleryIdxWithScene], + models.GalleryPartial{ + SceneIDs: &models.UpdateIDs{ + IDs: []int{invalidID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Gallery{}, + true, + }, + { + "add invalid tags", + galleryIDs[galleryIdxWithTwoTags], + models.GalleryPartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{invalidID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Gallery{}, + true, + }, + { + "add invalid performers", + galleryIDs[galleryIdxWithTwoPerformers], + models.GalleryPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{invalidID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Gallery{}, + true, + }, + { + "remove scenes", + galleryIDs[galleryIdxWithScene], + models.GalleryPartial{ + SceneIDs: &models.UpdateIDs{ + IDs: []int{sceneIDs[sceneIdxWithGallery]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Gallery{ + SceneIDs: models.NewRelatedIDs([]int{}), + }, + false, + }, + { + "remove tags", + galleryIDs[galleryIdxWithTwoTags], + models.GalleryPartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Gallery{ + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx2WithGallery]}), + }, + false, + }, + { + "remove performers", + galleryIDs[galleryIdxWithTwoPerformers], + models.GalleryPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Gallery{ + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx2WithGallery]}), + }, + false, + }, + { + "remove unrelated scenes", + galleryIDs[galleryIdxWithScene], + models.GalleryPartial{ + SceneIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[sceneIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Gallery{ + SceneIDs: models.NewRelatedIDs([]int{sceneIDs[sceneIdxWithGallery]}), + }, + false, + }, + { + "remove unrelated tags", + galleryIDs[galleryIdxWithTwoTags], + models.GalleryPartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Gallery{ + TagIDs: models.NewRelatedIDs(indexesToIDs(tagIDs, galleryTags[galleryIdxWithTwoTags])), + }, + false, + }, + { + "remove unrelated performers", + galleryIDs[galleryIdxWithTwoPerformers], + models.GalleryPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithDupName]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Gallery{ + PerformerIDs: models.NewRelatedIDs(indexesToIDs(performerIDs, galleryPerformers[galleryIdxWithTwoPerformers])), + }, + false, + }, + } + + for _, tt := range tests { + qb := db.Gallery + + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + got, err := qb.UpdatePartial(ctx, tt.id, tt.partial) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.UpdatePartial() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + s, err := qb.Find(ctx, tt.id) + if err != nil { + t.Errorf("galleryQueryBuilder.Find() error = %v", err) + } + + // load relationships + if err := loadGalleryRelationships(ctx, tt.want, got); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + if err := loadGalleryRelationships(ctx, tt.want, s); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + // only compare fields that were in the partial + if tt.partial.PerformerIDs != nil { + assert.Equal(tt.want.PerformerIDs, got.PerformerIDs) + assert.Equal(tt.want.PerformerIDs, s.PerformerIDs) + } + if tt.partial.TagIDs != nil { + assert.Equal(tt.want.TagIDs, got.TagIDs) + assert.Equal(tt.want.TagIDs, s.TagIDs) + } + if tt.partial.SceneIDs != nil { + assert.Equal(tt.want.SceneIDs, got.SceneIDs) + assert.Equal(tt.want.SceneIDs, s.SceneIDs) + } + }) + } +} + +func Test_galleryQueryBuilder_Destroy(t *testing.T) { + tests := []struct { + name string + id int + wantErr bool + }{ + { + "valid", + galleryIDs[galleryIdxWithScene], + false, + }, + { + "invalid", + invalidID, + true, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + if err := qb.Destroy(ctx, tt.id); (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.Destroy() error = %v, wantErr %v", err, tt.wantErr) + } + + // ensure cannot be found + i, err := qb.Find(ctx, tt.id) + + assert.NotNil(err) + assert.Nil(i) + return + + }) + } +} + +func makeGalleryWithID(index int) *models.Gallery { + const includeScenes = true + ret := makeGallery(index, includeScenes) + ret.ID = galleryIDs[index] + + if ret.Date != nil && ret.Date.IsZero() { + ret.Date = nil + } + + ret.Files = models.NewRelatedFiles([]file.File{makeGalleryFile(index)}) + + return ret +} + +func Test_galleryQueryBuilder_Find(t *testing.T) { + tests := []struct { + name string + id int + want *models.Gallery + wantErr bool + }{ + { + "valid", + galleryIDs[galleryIdxWithImage], + makeGalleryWithID(galleryIdxWithImage), + false, + }, + { + "invalid", + invalidID, + nil, + true, + }, + { + "with performers", + galleryIDs[galleryIdxWithTwoPerformers], + makeGalleryWithID(galleryIdxWithTwoPerformers), + false, + }, + { + "with tags", + galleryIDs[galleryIdxWithTwoTags], + makeGalleryWithID(galleryIdxWithTwoTags), + false, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.Find(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.Find() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if got != nil { + // load relationships + if err := loadGalleryRelationships(ctx, *tt.want, got); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + clearGalleryFileIDs(got) + } + assert.Equal(tt.want, got) + }) + } +} + +func postFindGalleries(ctx context.Context, want []*models.Gallery, got []*models.Gallery) error { + for i, s := range got { + // load relationships + if i < len(want) { + if err := loadGalleryRelationships(ctx, *want[i], s); err != nil { + return err + } } + clearGalleryFileIDs(s) + } - assert.Nil(t, galleries) + return nil +} - return nil - }) +func Test_galleryQueryBuilder_FindMany(t *testing.T) { + tests := []struct { + name string + ids []int + want []*models.Gallery + wantErr bool + }{ + { + "valid with relationships", + []int{galleryIDs[galleryIdxWithImage], galleryIDs[galleryIdxWithTwoPerformers], galleryIDs[galleryIdxWithTwoTags]}, + []*models.Gallery{ + makeGalleryWithID(galleryIdxWithImage), + makeGalleryWithID(galleryIdxWithTwoPerformers), + makeGalleryWithID(galleryIdxWithTwoTags), + }, + false, + }, + { + "invalid", + []int{galleryIDs[galleryIdxWithImage], galleryIDs[galleryIdxWithTwoPerformers], invalidID}, + nil, + true, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindMany(ctx, tt.ids) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.FindMany() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindGalleries(ctx, tt.want, got); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_galleryQueryBuilder_FindByChecksum(t *testing.T) { + getChecksum := func(index int) string { + return getGalleryStringValue(index, checksumField) + } + + tests := []struct { + name string + checksum string + want []*models.Gallery + wantErr bool + }{ + { + "valid", + getChecksum(galleryIdxWithImage), + []*models.Gallery{makeGalleryWithID(galleryIdxWithImage)}, + false, + }, + { + "invalid", + "invalid checksum", + nil, + false, + }, + { + "with performers", + getChecksum(galleryIdxWithTwoPerformers), + []*models.Gallery{makeGalleryWithID(galleryIdxWithTwoPerformers)}, + false, + }, + { + "with tags", + getChecksum(galleryIdxWithTwoTags), + []*models.Gallery{makeGalleryWithID(galleryIdxWithTwoTags)}, + false, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByChecksum(ctx, tt.checksum) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.FindByChecksum() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindGalleries(ctx, tt.want, got); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_galleryQueryBuilder_FindByChecksums(t *testing.T) { + getChecksum := func(index int) string { + return getGalleryStringValue(index, checksumField) + } + + tests := []struct { + name string + checksums []string + want []*models.Gallery + wantErr bool + }{ + { + "valid with relationships", + []string{ + getChecksum(galleryIdxWithImage), + getChecksum(galleryIdxWithTwoPerformers), + getChecksum(galleryIdxWithTwoTags), + }, + []*models.Gallery{ + makeGalleryWithID(galleryIdxWithImage), + makeGalleryWithID(galleryIdxWithTwoPerformers), + makeGalleryWithID(galleryIdxWithTwoTags), + }, + false, + }, + { + "with invalid", + []string{ + getChecksum(galleryIdxWithImage), + getChecksum(galleryIdxWithTwoPerformers), + "invalid checksum", + getChecksum(galleryIdxWithTwoTags), + }, + []*models.Gallery{ + makeGalleryWithID(galleryIdxWithImage), + makeGalleryWithID(galleryIdxWithTwoPerformers), + makeGalleryWithID(galleryIdxWithTwoTags), + }, + false, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByChecksums(ctx, tt.checksums) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.FindByChecksum() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindGalleries(ctx, tt.want, got); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_galleryQueryBuilder_FindByPath(t *testing.T) { + getPath := func(index int) string { + return getFilePath(folderIdxWithGalleryFiles, getGalleryBasename(index)) + } + + tests := []struct { + name string + path string + want []*models.Gallery + wantErr bool + }{ + { + "valid", + getPath(galleryIdxWithImage), + []*models.Gallery{makeGalleryWithID(galleryIdxWithImage)}, + false, + }, + { + "invalid", + "invalid path", + nil, + false, + }, + { + "with performers", + getPath(galleryIdxWithTwoPerformers), + []*models.Gallery{makeGalleryWithID(galleryIdxWithTwoPerformers)}, + false, + }, + { + "with tags", + getPath(galleryIdxWithTwoTags), + []*models.Gallery{makeGalleryWithID(galleryIdxWithTwoTags)}, + false, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByPath(ctx, tt.path) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.FindByPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindGalleries(ctx, tt.want, got); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_galleryQueryBuilder_FindBySceneID(t *testing.T) { + tests := []struct { + name string + sceneID int + want []*models.Gallery + wantErr bool + }{ + { + "valid", + sceneIDs[sceneIdxWithGallery], + []*models.Gallery{makeGalleryWithID(galleryIdxWithScene)}, + false, + }, + { + "none", + sceneIDs[sceneIdx1WithPerformer], + nil, + false, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindBySceneID(ctx, tt.sceneID) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.FindBySceneID() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindGalleries(ctx, tt.want, got); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_galleryQueryBuilder_FindByImageID(t *testing.T) { + tests := []struct { + name string + imageID int + want []*models.Gallery + wantErr bool + }{ + { + "valid", + imageIDs[imageIdxWithTwoGalleries], + []*models.Gallery{ + makeGalleryWithID(galleryIdx1WithImage), + makeGalleryWithID(galleryIdx2WithImage), + }, + false, + }, + { + "none", + imageIDs[imageIdx1WithPerformer], + nil, + false, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByImageID(ctx, tt.imageID) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.FindByImageID() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindGalleries(ctx, tt.want, got); err != nil { + t.Errorf("loadGalleryRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_galleryQueryBuilder_CountByImageID(t *testing.T) { + tests := []struct { + name string + imageID int + want int + wantErr bool + }{ + { + "valid", + imageIDs[imageIdxWithTwoGalleries], + 2, + false, + }, + { + "none", + imageIDs[imageIdx1WithPerformer], + 0, + false, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.CountByImageID(ctx, tt.imageID) + if (err != nil) != tt.wantErr { + t.Errorf("galleryQueryBuilder.CountByImageID() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("galleryQueryBuilder.CountByImageID() = %v, want %v", got, tt.want) + } + }) + } +} + +func galleriesToIDs(i []*models.Gallery) []int { + var ret []int + for _, ii := range i { + ret = append(ret, ii.ID) + } + + return ret +} + +func Test_galleryStore_FindByFileID(t *testing.T) { + tests := []struct { + name string + fileID file.ID + include []int + exclude []int + }{ + { + "valid", + galleryFileIDs[galleryIdx1WithImage], + []int{galleryIdx1WithImage}, + nil, + }, + { + "invalid", + invalidFileID, + nil, + []int{galleryIdx1WithImage}, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByFileID(ctx, tt.fileID) + if err != nil { + t.Errorf("GalleryStore.FindByFileID() error = %v", err) + return + } + for _, f := range got { + clearGalleryFileIDs(f) + } + + ids := galleriesToIDs(got) + include := indexesToIDs(galleryIDs, tt.include) + exclude := indexesToIDs(galleryIDs, tt.exclude) + + for _, i := range include { + assert.Contains(ids, i) + } + for _, e := range exclude { + assert.NotContains(ids, e) + } + }) + } +} + +func Test_galleryStore_FindByFolderID(t *testing.T) { + tests := []struct { + name string + folderID file.FolderID + include []int + exclude []int + }{ + // TODO - add folder gallery + { + "invalid", + invalidFolderID, + nil, + []int{galleryIdxWithImage}, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByFolderID(ctx, tt.folderID) + if err != nil { + t.Errorf("GalleryStore.FindByFolderID() error = %v", err) + return + } + for _, f := range got { + clearGalleryFileIDs(f) + } + + ids := galleriesToIDs(got) + include := indexesToIDs(imageIDs, tt.include) + exclude := indexesToIDs(imageIDs, tt.exclude) + + for _, i := range include { + assert.Contains(ids, i) + } + for _, e := range exclude { + assert.NotContains(ids, e) + } + }) + } } func TestGalleryQueryQ(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { const galleryIdx = 0 q := getGalleryStringValue(galleryIdx, pathField) - - sqb := r.Gallery() - - galleryQueryQ(t, sqb, q, galleryIdx) + galleryQueryQ(ctx, t, q, galleryIdx) return nil }) } -func galleryQueryQ(t *testing.T, qb models.GalleryReader, q string, expectedGalleryIdx int) { +func galleryQueryQ(ctx context.Context, t *testing.T, q string, expectedGalleryIdx int) { + qb := db.Gallery + filter := models.FindFilterType{ Q: &q, } - galleries, _, err := qb.Query(nil, &filter) + galleries, _, err := qb.Query(ctx, nil, &filter) if err != nil { t.Errorf("Error querying gallery: %s", err.Error()) + return } assert.Len(t, galleries, 1) @@ -146,7 +1404,7 @@ func galleryQueryQ(t *testing.T, qb models.GalleryReader, q string, expectedGall // no Q should return all results filter.Q = nil - galleries, _, err = qb.Query(nil, &filter) + galleries, _, err = qb.Query(ctx, nil, &filter) if err != nil { t.Errorf("Error querying gallery: %s", err.Error()) } @@ -155,43 +1413,90 @@ func galleryQueryQ(t *testing.T, qb models.GalleryReader, q string, expectedGall } func TestGalleryQueryPath(t *testing.T) { - withTxn(func(r models.Repository) error { - const galleryIdx = 1 - galleryPath := getGalleryStringValue(galleryIdx, "Path") + const galleryIdx = 1 + galleryPath := getFilePath(folderIdxWithGalleryFiles, getGalleryBasename(galleryIdx)) - pathCriterion := models.StringCriterionInput{ - Value: galleryPath, - Modifier: models.CriterionModifierEquals, - } + tests := []struct { + name string + input models.StringCriterionInput + }{ + { + "equals", + models.StringCriterionInput{ + Value: galleryPath, + Modifier: models.CriterionModifierEquals, + }, + }, + { + "not equals", + models.StringCriterionInput{ + Value: galleryPath, + Modifier: models.CriterionModifierNotEquals, + }, + }, + { + "matches regex", + models.StringCriterionInput{ + Value: "gallery.*1_Path", + Modifier: models.CriterionModifierMatchesRegex, + }, + }, + { + "not matches regex", + models.StringCriterionInput{ + Value: "gallery.*1_Path", + Modifier: models.CriterionModifierNotMatchesRegex, + }, + }, + { + "is null", + models.StringCriterionInput{ + Modifier: models.CriterionModifierIsNull, + }, + }, + { + "not null", + models.StringCriterionInput{ + Modifier: models.CriterionModifierNotNull, + }, + }, + } - verifyGalleriesPath(t, r.Gallery(), pathCriterion) + qb := db.Gallery - pathCriterion.Modifier = models.CriterionModifierNotEquals - verifyGalleriesPath(t, r.Gallery(), pathCriterion) + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, count, err := qb.Query(ctx, &models.GalleryFilterType{ + Path: &tt.input, + }, nil) - pathCriterion.Modifier = models.CriterionModifierMatchesRegex - pathCriterion.Value = "gallery.*1_Path" - verifyGalleriesPath(t, r.Gallery(), pathCriterion) + if err != nil { + t.Errorf("GalleryStore.TestSceneQueryPath() error = %v", err) + return + } - pathCriterion.Modifier = models.CriterionModifierNotMatchesRegex - verifyGalleriesPath(t, r.Gallery(), pathCriterion) + assert.NotEqual(t, 0, count) - return nil - }) + for _, gallery := range got { + verifyString(t, gallery.Path, tt.input) + } + }) + } } -func verifyGalleriesPath(t *testing.T, sqb models.GalleryReader, pathCriterion models.StringCriterionInput) { +func verifyGalleriesPath(ctx context.Context, t *testing.T, pathCriterion models.StringCriterionInput) { galleryFilter := models.GalleryFilterType{ Path: &pathCriterion, } - galleries, _, err := sqb.Query(&galleryFilter, nil) + sqb := db.Gallery + galleries, _, err := sqb.Query(ctx, &galleryFilter, nil) if err != nil { t.Errorf("Error querying gallery: %s", err.Error()) } for _, gallery := range galleries { - verifyNullString(t, gallery.Path, pathCriterion) + verifyString(t, gallery.Path, pathCriterion) } } @@ -199,8 +1504,8 @@ func TestGalleryQueryPathOr(t *testing.T) { const gallery1Idx = 1 const gallery2Idx = 2 - gallery1Path := getGalleryStringValue(gallery1Idx, "Path") - gallery2Path := getGalleryStringValue(gallery2Idx, "Path") + gallery1Path := getFilePath(folderIdxWithGalleryFiles, getGalleryBasename(gallery1Idx)) + gallery2Path := getFilePath(folderIdxWithGalleryFiles, getGalleryBasename(gallery2Idx)) galleryFilter := models.GalleryFilterType{ Path: &models.StringCriterionInput{ @@ -215,14 +1520,17 @@ func TestGalleryQueryPathOr(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) - assert.Len(t, galleries, 2) - assert.Equal(t, gallery1Path, galleries[0].Path.String) - assert.Equal(t, gallery2Path, galleries[1].Path.String) + if !assert.Len(t, galleries, 2) { + return nil + } + + assert.Equal(t, gallery1Path, galleries[0].Path) + assert.Equal(t, gallery2Path, galleries[1].Path) return nil }) @@ -230,8 +1538,8 @@ func TestGalleryQueryPathOr(t *testing.T) { func TestGalleryQueryPathAndRating(t *testing.T) { const galleryIdx = 1 - galleryPath := getGalleryStringValue(galleryIdx, "Path") - galleryRating := getRating(galleryIdx) + galleryPath := getFilePath(folderIdxWithGalleryFiles, getGalleryBasename(galleryIdx)) + galleryRating := getIntPtr(getRating(galleryIdx)) galleryFilter := models.GalleryFilterType{ Path: &models.StringCriterionInput{ @@ -240,20 +1548,23 @@ func TestGalleryQueryPathAndRating(t *testing.T) { }, And: &models.GalleryFilterType{ Rating: &models.IntCriterionInput{ - Value: int(galleryRating.Int64), + Value: *galleryRating, Modifier: models.CriterionModifierEquals, }, }, } - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) - assert.Len(t, galleries, 1) - assert.Equal(t, galleryPath, galleries[0].Path.String) - assert.Equal(t, galleryRating.Int64, galleries[0].Rating.Int64) + if !assert.Len(t, galleries, 1) { + return nil + } + + assert.Equal(t, galleryPath, galleries[0].Path) + assert.Equal(t, *galleryRating, *galleries[0].Rating) return nil }) @@ -281,15 +1592,15 @@ func TestGalleryQueryPathNotRating(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) for _, gallery := range galleries { - verifyNullString(t, gallery.Path, pathCriterion) + verifyString(t, gallery.Path, pathCriterion) ratingCriterion.Modifier = models.CriterionModifierNotEquals - verifyInt64(t, gallery.Rating, ratingCriterion) + verifyIntPtr(t, gallery.Rating, ratingCriterion) } return nil @@ -312,20 +1623,20 @@ func TestGalleryIllegalQuery(t *testing.T) { Or: &subFilter, } - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery - _, _, err := sqb.Query(galleryFilter, nil) + _, _, err := sqb.Query(ctx, galleryFilter, nil) assert.NotNil(err) galleryFilter.Or = nil galleryFilter.Not = &subFilter - _, _, err = sqb.Query(galleryFilter, nil) + _, _, err = sqb.Query(ctx, galleryFilter, nil) assert.NotNil(err) galleryFilter.And = nil galleryFilter.Or = &subFilter - _, _, err = sqb.Query(galleryFilter, nil) + _, _, err = sqb.Query(ctx, galleryFilter, nil) assert.NotNil(err) return nil @@ -347,7 +1658,7 @@ func TestGalleryQueryURL(t *testing.T) { verifyFn := func(g *models.Gallery) { t.Helper() - verifyNullString(t, g.URL, urlCriterion) + verifyString(t, g.URL, urlCriterion) } verifyGalleryQuery(t, filter, verifyFn) @@ -371,11 +1682,11 @@ func TestGalleryQueryURL(t *testing.T) { } func verifyGalleryQuery(t *testing.T, filter models.GalleryFilterType, verifyFn func(s *models.Gallery)) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { t.Helper() - sqb := r.Gallery() + sqb := db.Gallery - galleries := queryGallery(t, sqb, &filter, nil) + galleries := queryGallery(ctx, t, sqb, &filter, nil) // assume it should find at least one assert.Greater(t, len(galleries), 0) @@ -414,19 +1725,19 @@ func TestGalleryQueryRating(t *testing.T) { } func verifyGalleriesRating(t *testing.T, ratingCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery galleryFilter := models.GalleryFilterType{ Rating: &ratingCriterion, } - galleries, _, err := sqb.Query(&galleryFilter, nil) + galleries, _, err := sqb.Query(ctx, &galleryFilter, nil) if err != nil { t.Errorf("Error querying gallery: %s", err.Error()) } for _, gallery := range galleries { - verifyInt64(t, gallery.Rating, ratingCriterion) + verifyIntPtr(t, gallery.Rating, ratingCriterion) } return nil @@ -434,8 +1745,8 @@ func verifyGalleriesRating(t *testing.T, ratingCriterion models.IntCriterionInpu } func TestGalleryQueryIsMissingScene(t *testing.T) { - withTxn(func(r models.Repository) error { - qb := r.Gallery() + withTxn(func(ctx context.Context) error { + qb := db.Gallery isMissing := "scenes" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, @@ -446,7 +1757,7 @@ func TestGalleryQueryIsMissingScene(t *testing.T) { Q: &q, } - galleries, _, err := qb.Query(&galleryFilter, &findFilter) + galleries, _, err := qb.Query(ctx, &galleryFilter, &findFilter) if err != nil { t.Errorf("Error querying gallery: %s", err.Error()) } @@ -454,7 +1765,7 @@ func TestGalleryQueryIsMissingScene(t *testing.T) { assert.Len(t, galleries, 0) findFilter.Q = nil - galleries, _, err = qb.Query(&galleryFilter, &findFilter) + galleries, _, err = qb.Query(ctx, &galleryFilter, &findFilter) if err != nil { t.Errorf("Error querying gallery: %s", err.Error()) } @@ -468,8 +1779,8 @@ func TestGalleryQueryIsMissingScene(t *testing.T) { }) } -func queryGallery(t *testing.T, sqb models.GalleryReader, galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) []*models.Gallery { - galleries, _, err := sqb.Query(galleryFilter, findFilter) +func queryGallery(ctx context.Context, t *testing.T, sqb models.GalleryReader, galleryFilter *models.GalleryFilterType, findFilter *models.FindFilterType) []*models.Gallery { + galleries, _, err := sqb.Query(ctx, galleryFilter, findFilter) if err != nil { t.Errorf("Error querying gallery: %s", err.Error()) } @@ -478,8 +1789,8 @@ func queryGallery(t *testing.T, sqb models.GalleryReader, galleryFilter *models. } func TestGalleryQueryIsMissingStudio(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery isMissing := "studio" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, @@ -490,12 +1801,12 @@ func TestGalleryQueryIsMissingStudio(t *testing.T) { Q: &q, } - galleries := queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) findFilter.Q = nil - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) // ensure non of the ids equal the one with studio for _, gallery := range galleries { @@ -507,8 +1818,8 @@ func TestGalleryQueryIsMissingStudio(t *testing.T) { } func TestGalleryQueryIsMissingPerformers(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery isMissing := "performers" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, @@ -519,12 +1830,12 @@ func TestGalleryQueryIsMissingPerformers(t *testing.T) { Q: &q, } - galleries := queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) findFilter.Q = nil - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.True(t, len(galleries) > 0) @@ -538,8 +1849,8 @@ func TestGalleryQueryIsMissingPerformers(t *testing.T) { } func TestGalleryQueryIsMissingTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery isMissing := "tags" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, @@ -550,12 +1861,12 @@ func TestGalleryQueryIsMissingTags(t *testing.T) { Q: &q, } - galleries := queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) findFilter.Q = nil - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.True(t, len(galleries) > 0) @@ -564,21 +1875,21 @@ func TestGalleryQueryIsMissingTags(t *testing.T) { } func TestGalleryQueryIsMissingDate(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery isMissing := "date" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, } - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) // three in four scenes have no date assert.Len(t, galleries, int(math.Ceil(float64(totalGalleries)/4*3))) // ensure date is null, empty or "0001-01-01" for _, g := range galleries { - assert.True(t, !g.Date.Valid || g.Date.String == "" || g.Date.String == "0001-01-01") + assert.True(t, g.Date == nil || g.Date.Time == time.Time{}) } return nil @@ -586,8 +1897,8 @@ func TestGalleryQueryIsMissingDate(t *testing.T) { } func TestGalleryQueryPerformers(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery performerCriterion := models.MultiCriterionInput{ Value: []string{ strconv.Itoa(performerIDs[performerIdxWithGallery]), @@ -600,7 +1911,7 @@ func TestGalleryQueryPerformers(t *testing.T) { Performers: &performerCriterion, } - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 2) @@ -617,7 +1928,7 @@ func TestGalleryQueryPerformers(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - galleries = queryGallery(t, sqb, &galleryFilter, nil) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 1) assert.Equal(t, galleryIDs[galleryIdxWithTwoPerformers], galleries[0].ID) @@ -634,7 +1945,7 @@ func TestGalleryQueryPerformers(t *testing.T) { Q: &q, } - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) return nil @@ -642,8 +1953,8 @@ func TestGalleryQueryPerformers(t *testing.T) { } func TestGalleryQueryTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithGallery]), @@ -656,7 +1967,7 @@ func TestGalleryQueryTags(t *testing.T) { Tags: &tagCriterion, } - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 2) // ensure ids are correct @@ -672,7 +1983,7 @@ func TestGalleryQueryTags(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - galleries = queryGallery(t, sqb, &galleryFilter, nil) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 1) assert.Equal(t, galleryIDs[galleryIdxWithTwoTags], galleries[0].ID) @@ -689,7 +2000,7 @@ func TestGalleryQueryTags(t *testing.T) { Q: &q, } - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) return nil @@ -697,8 +2008,8 @@ func TestGalleryQueryTags(t *testing.T) { } func TestGalleryQueryStudio(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(studioIDs[studioIdxWithGallery]), @@ -710,7 +2021,7 @@ func TestGalleryQueryStudio(t *testing.T) { Studios: &studioCriterion, } - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 1) @@ -729,7 +2040,7 @@ func TestGalleryQueryStudio(t *testing.T) { Q: &q, } - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) return nil @@ -737,8 +2048,8 @@ func TestGalleryQueryStudio(t *testing.T) { } func TestGalleryQueryStudioDepth(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery depth := 2 studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ @@ -752,16 +2063,16 @@ func TestGalleryQueryStudioDepth(t *testing.T) { Studios: &studioCriterion, } - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 1) depth = 1 - galleries = queryGallery(t, sqb, &galleryFilter, nil) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 0) studioCriterion.Value = []string{strconv.Itoa(studioIDs[studioIdxWithParentAndChild])} - galleries = queryGallery(t, sqb, &galleryFilter, nil) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 1) // ensure id is correct @@ -782,15 +2093,15 @@ func TestGalleryQueryStudioDepth(t *testing.T) { Q: &q, } - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) depth = 1 - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 1) studioCriterion.Value = []string{strconv.Itoa(studioIDs[studioIdxWithParentAndChild])} - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) return nil @@ -798,8 +2109,8 @@ func TestGalleryQueryStudioDepth(t *testing.T) { } func TestGalleryQueryPerformerTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithPerformer]), @@ -812,7 +2123,7 @@ func TestGalleryQueryPerformerTags(t *testing.T) { PerformerTags: &tagCriterion, } - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 2) // ensure ids are correct @@ -828,7 +2139,7 @@ func TestGalleryQueryPerformerTags(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - galleries = queryGallery(t, sqb, &galleryFilter, nil) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Len(t, galleries, 1) assert.Equal(t, galleryIDs[galleryIdxWithPerformerTwoTags], galleries[0].ID) @@ -845,7 +2156,7 @@ func TestGalleryQueryPerformerTags(t *testing.T) { Q: &q, } - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) tagCriterion = models.HierarchicalMultiCriterionInput{ @@ -853,22 +2164,22 @@ func TestGalleryQueryPerformerTags(t *testing.T) { } q = getGalleryStringValue(galleryIdx1WithImage, titleField) - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 1) assert.Equal(t, galleryIDs[galleryIdx1WithImage], galleries[0].ID) q = getGalleryStringValue(galleryIdxWithPerformerTag, titleField) - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) tagCriterion.Modifier = models.CriterionModifierNotNull - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 1) assert.Equal(t, galleryIDs[galleryIdxWithPerformerTag], galleries[0].ID) q = getGalleryStringValue(galleryIdx1WithImage, titleField) - galleries = queryGallery(t, sqb, &galleryFilter, &findFilter) + galleries = queryGallery(ctx, t, sqb, &galleryFilter, &findFilter) assert.Len(t, galleries, 0) return nil @@ -895,21 +2206,21 @@ func TestGalleryQueryTagCount(t *testing.T) { } func verifyGalleriesTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery galleryFilter := models.GalleryFilterType{ TagCount: &tagCountCriterion, } - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Greater(t, len(galleries), 0) for _, gallery := range galleries { - ids, err := sqb.GetTagIDs(gallery.ID) - if err != nil { - return err + if err := gallery.LoadTagIDs(ctx, sqb); err != nil { + t.Errorf("gallery.LoadTagIDs() error = %v", err) + return nil } - verifyInt(t, len(ids), tagCountCriterion) + verifyInt(t, len(gallery.TagIDs.List()), tagCountCriterion) } return nil @@ -936,21 +2247,22 @@ func TestGalleryQueryPerformerCount(t *testing.T) { } func verifyGalleriesPerformerCount(t *testing.T, performerCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery galleryFilter := models.GalleryFilterType{ PerformerCount: &performerCountCriterion, } - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Greater(t, len(galleries), 0) for _, gallery := range galleries { - ids, err := sqb.GetPerformerIDs(gallery.ID) - if err != nil { - return err + if err := gallery.LoadPerformerIDs(ctx, sqb); err != nil { + t.Errorf("gallery.LoadPerformerIDs() error = %v", err) + return nil } - verifyInt(t, len(ids), performerCountCriterion) + + verifyInt(t, len(gallery.PerformerIDs.List()), performerCountCriterion) } return nil @@ -958,8 +2270,8 @@ func verifyGalleriesPerformerCount(t *testing.T, performerCountCriterion models. } func TestGalleryQueryAverageResolution(t *testing.T) { - withTxn(func(r models.Repository) error { - qb := r.Gallery() + withTxn(func(ctx context.Context) error { + qb := db.Gallery resolution := models.ResolutionEnumLow galleryFilter := models.GalleryFilterType{ AverageResolution: &models.ResolutionCriterionInput{ @@ -969,7 +2281,7 @@ func TestGalleryQueryAverageResolution(t *testing.T) { } // not verifying average - just ensure we get at least one - galleries := queryGallery(t, qb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, qb, &galleryFilter, nil) assert.Greater(t, len(galleries), 0) return nil @@ -996,19 +2308,19 @@ func TestGalleryQueryImageCount(t *testing.T) { } func verifyGalleriesImageCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Gallery() + withTxn(func(ctx context.Context) error { + sqb := db.Gallery galleryFilter := models.GalleryFilterType{ ImageCount: &imageCountCriterion, } - galleries := queryGallery(t, sqb, &galleryFilter, nil) + galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) assert.Greater(t, len(galleries), -1) for _, gallery := range galleries { pp := 0 - result, err := r.Image().Query(models.ImageQueryOptions{ + result, err := db.Image.Query(ctx, models.ImageQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: &models.FindFilterType{ PerPage: &pp, @@ -1032,8 +2344,66 @@ func verifyGalleriesImageCount(t *testing.T, imageCountCriterion models.IntCrite }) } +func TestGalleryQuerySorting(t *testing.T) { + tests := []struct { + name string + sortBy string + dir models.SortDirectionEnum + firstGalleryIdx int // -1 to ignore + lastGalleryIdx int + }{ + { + "file mod time", + "file_mod_time", + models.SortDirectionEnumDesc, + -1, + -1, + }, + { + "path", + "path", + models.SortDirectionEnumDesc, + -1, + -1, + }, + } + + qb := db.Gallery + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, _, err := qb.Query(ctx, nil, &models.FindFilterType{ + Sort: &tt.sortBy, + Direction: &tt.dir, + }) + + if err != nil { + t.Errorf("GalleryStore.TestGalleryQuerySorting() error = %v", err) + return + } + + if !assert.Greater(len(got), 0) { + return + } + + // scenes should be in same order as indexes + firstGallery := got[0] + lastGallery := got[len(got)-1] + + if tt.firstGalleryIdx != -1 { + firstID := galleryIDs[tt.firstGalleryIdx] + assert.Equal(firstID, firstGallery.ID) + } + if tt.lastGalleryIdx != -1 { + lastID := galleryIDs[tt.lastGalleryIdx] + assert.Equal(lastID, lastGallery.ID) + } + }) + } +} + // TODO Count // TODO All // TODO Query -// TODO Update // TODO Destroy diff --git a/pkg/sqlite/hooks.go b/pkg/sqlite/hooks.go new file mode 100644 index 000000000..468bbbdf9 --- /dev/null +++ b/pkg/sqlite/hooks.go @@ -0,0 +1,50 @@ +package sqlite + +import ( + "context" + + "github.com/stashapp/stash/pkg/txn" +) + +type hookManager struct { + postCommitHooks []txn.TxnFunc + postRollbackHooks []txn.TxnFunc +} + +func (m *hookManager) register(ctx context.Context) context.Context { + return context.WithValue(ctx, hookManagerKey, m) +} + +func (db *Database) hookManager(ctx context.Context) *hookManager { + m, ok := ctx.Value(hookManagerKey).(*hookManager) + if !ok { + return nil + } + return m +} + +func (db *Database) executePostCommitHooks(ctx context.Context) { + m := db.hookManager(ctx) + for _, h := range m.postCommitHooks { + // ignore errors + _ = h(ctx) + } +} + +func (db *Database) executePostRollbackHooks(ctx context.Context) { + m := db.hookManager(ctx) + for _, h := range m.postRollbackHooks { + // ignore errors + _ = h(ctx) + } +} + +func (db *Database) AddPostCommitHook(ctx context.Context, hook txn.TxnFunc) { + m := db.hookManager(ctx) + m.postCommitHooks = append(m.postCommitHooks, hook) +} + +func (db *Database) AddPostRollbackHook(ctx context.Context, hook txn.TxnFunc) { + m := db.hookManager(ctx) + m.postRollbackHooks = append(m.postRollbackHooks, hook) +} diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index d2b3adb8f..5d2eb22fd 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -1,199 +1,582 @@ package sqlite import ( + "context" "database/sql" - "errors" "fmt" + "path/filepath" + "time" + "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" + "gopkg.in/guregu/null.v4" + "gopkg.in/guregu/null.v4/zero" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" ) -const imageTable = "images" -const imageIDColumn = "image_id" -const performersImagesTable = "performers_images" -const imagesTagsTable = "images_tags" +var imageTable = "images" -var imagesForGalleryQuery = selectAll(imageTable) + ` -INNER JOIN galleries_images as galleries_join on galleries_join.image_id = images.id -WHERE galleries_join.gallery_id = ? -GROUP BY images.id -` +const ( + imageIDColumn = "image_id" + performersImagesTable = "performers_images" + imagesTagsTable = "images_tags" + imagesFilesTable = "images_files" +) -var countImagesForGalleryQuery = ` -SELECT gallery_id FROM galleries_images -WHERE gallery_id = ? -GROUP BY image_id -` - -type imageQueryBuilder struct { - repository +type imageRow struct { + ID int `db:"id" goqu:"skipinsert"` + Title zero.String `db:"title"` + Rating null.Int `db:"rating"` + Organized bool `db:"organized"` + OCounter int `db:"o_counter"` + StudioID null.Int `db:"studio_id,omitempty"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` } -func NewImageReaderWriter(tx dbi) *imageQueryBuilder { - return &imageQueryBuilder{ - repository{ - tx: tx, +func (r *imageRow) fromImage(i models.Image) { + r.ID = i.ID + r.Title = zero.StringFrom(i.Title) + r.Rating = intFromPtr(i.Rating) + r.Organized = i.Organized + r.OCounter = i.OCounter + r.StudioID = intFromPtr(i.StudioID) + r.CreatedAt = i.CreatedAt + r.UpdatedAt = i.UpdatedAt +} + +type imageQueryRow struct { + imageRow + PrimaryFileID null.Int `db:"primary_file_id"` + PrimaryFileFolderPath zero.String `db:"primary_file_folder_path"` + PrimaryFileBasename zero.String `db:"primary_file_basename"` + PrimaryFileChecksum zero.String `db:"primary_file_checksum"` +} + +func (r *imageQueryRow) resolve() *models.Image { + ret := &models.Image{ + ID: r.ID, + Title: r.Title.String, + Rating: nullIntPtr(r.Rating), + Organized: r.Organized, + OCounter: r.OCounter, + StudioID: nullIntPtr(r.StudioID), + + PrimaryFileID: nullIntFileIDPtr(r.PrimaryFileID), + Checksum: r.PrimaryFileChecksum.String, + + CreatedAt: r.CreatedAt, + UpdatedAt: r.UpdatedAt, + } + + if r.PrimaryFileFolderPath.Valid && r.PrimaryFileBasename.Valid { + ret.Path = filepath.Join(r.PrimaryFileFolderPath.String, r.PrimaryFileBasename.String) + } + + return ret +} + +type imageRowRecord struct { + updateRecord +} + +func (r *imageRowRecord) fromPartial(i models.ImagePartial) { + r.setNullString("title", i.Title) + r.setNullInt("rating", i.Rating) + r.setBool("organized", i.Organized) + r.setInt("o_counter", i.OCounter) + r.setNullInt("studio_id", i.StudioID) + r.setTime("created_at", i.CreatedAt) + r.setTime("updated_at", i.UpdatedAt) +} + +type ImageStore struct { + repository + + tableMgr *table + oCounterManager + + fileStore *FileStore +} + +func NewImageStore(fileStore *FileStore) *ImageStore { + return &ImageStore{ + repository: repository{ tableName: imageTable, idColumn: idColumn, }, + tableMgr: imageTableMgr, + oCounterManager: oCounterManager{imageTableMgr}, + fileStore: fileStore, } } -func (qb *imageQueryBuilder) Create(newObject models.Image) (*models.Image, error) { - var ret models.Image - if err := qb.insertObject(newObject, &ret); err != nil { - return nil, err - } - - return &ret, nil +func (qb *ImageStore) table() exp.IdentifierExpression { + return qb.tableMgr.table } -func (qb *imageQueryBuilder) Update(updatedObject models.ImagePartial) (*models.Image, error) { - const partial = true - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { - return nil, err - } +func (qb *ImageStore) Create(ctx context.Context, newObject *models.ImageCreateInput) error { + var r imageRow + r.fromImage(*newObject.Image) - return qb.find(updatedObject.ID) -} - -func (qb *imageQueryBuilder) UpdateFull(updatedObject models.Image) (*models.Image, error) { - const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { - return nil, err - } - - return qb.find(updatedObject.ID) -} - -func (qb *imageQueryBuilder) IncrementOCounter(id int) (int, error) { - _, err := qb.tx.Exec( - `UPDATE `+imageTable+` SET o_counter = o_counter + 1 WHERE `+imageTable+`.id = ?`, - id, - ) + id, err := qb.tableMgr.insertID(ctx, r) if err != nil { - return 0, err + return err } - image, err := qb.find(id) + if len(newObject.FileIDs) > 0 { + const firstPrimary = true + if err := imagesFilesTableMgr.insertJoins(ctx, id, firstPrimary, newObject.FileIDs); err != nil { + return err + } + } + + if newObject.PerformerIDs.Loaded() { + if err := imagesPerformersTableMgr.insertJoins(ctx, id, newObject.PerformerIDs.List()); err != nil { + return err + } + } + if newObject.TagIDs.Loaded() { + if err := imagesTagsTableMgr.insertJoins(ctx, id, newObject.TagIDs.List()); err != nil { + return err + } + } + + if newObject.GalleryIDs.Loaded() { + if err := imageGalleriesTableMgr.insertJoins(ctx, id, newObject.GalleryIDs.List()); err != nil { + return err + } + } + + updated, err := qb.Find(ctx, id) if err != nil { - return 0, err + return fmt.Errorf("finding after create: %w", err) } - return image.OCounter, nil + *newObject.Image = *updated + + return nil } -func (qb *imageQueryBuilder) DecrementOCounter(id int) (int, error) { - _, err := qb.tx.Exec( - `UPDATE `+imageTable+` SET o_counter = o_counter - 1 WHERE `+imageTable+`.id = ? and `+imageTable+`.o_counter > 0`, - id, - ) - if err != nil { - return 0, err +func (qb *ImageStore) UpdatePartial(ctx context.Context, id int, partial models.ImagePartial) (*models.Image, error) { + r := imageRowRecord{ + updateRecord{ + Record: make(exp.Record), + }, } - image, err := qb.find(id) - if err != nil { - return 0, err - } + r.fromPartial(partial) - return image.OCounter, nil -} - -func (qb *imageQueryBuilder) ResetOCounter(id int) (int, error) { - _, err := qb.tx.Exec( - `UPDATE `+imageTable+` SET o_counter = 0 WHERE `+imageTable+`.id = ?`, - id, - ) - if err != nil { - return 0, err - } - - image, err := qb.find(id) - if err != nil { - return 0, err - } - - return image.OCounter, nil -} - -func (qb *imageQueryBuilder) Destroy(id int) error { - return qb.destroyExisting([]int{id}) -} - -func (qb *imageQueryBuilder) Find(id int) (*models.Image, error) { - return qb.find(id) -} - -func (qb *imageQueryBuilder) FindMany(ids []int) ([]*models.Image, error) { - var images []*models.Image - for _, id := range ids { - image, err := qb.Find(id) - if err != nil { + if len(r.Record) > 0 { + if err := qb.tableMgr.updateByID(ctx, id, r.Record); err != nil { return nil, err } + } - if image == nil { - return nil, fmt.Errorf("image with id %d not found", id) + if partial.GalleryIDs != nil { + if err := imageGalleriesTableMgr.modifyJoins(ctx, id, partial.GalleryIDs.IDs, partial.GalleryIDs.Mode); err != nil { + return nil, err + } + } + if partial.PerformerIDs != nil { + if err := imagesPerformersTableMgr.modifyJoins(ctx, id, partial.PerformerIDs.IDs, partial.PerformerIDs.Mode); err != nil { + return nil, err + } + } + if partial.TagIDs != nil { + if err := imagesTagsTableMgr.modifyJoins(ctx, id, partial.TagIDs.IDs, partial.TagIDs.Mode); err != nil { + return nil, err + } + } + + if partial.PrimaryFileID != nil { + if err := imagesFilesTableMgr.setPrimary(ctx, id, *partial.PrimaryFileID); err != nil { + return nil, err + } + } + + return qb.find(ctx, id) +} + +func (qb *ImageStore) Update(ctx context.Context, updatedObject *models.Image) error { + var r imageRow + r.fromImage(*updatedObject) + + if err := qb.tableMgr.updateByID(ctx, updatedObject.ID, r); err != nil { + return err + } + + if updatedObject.PerformerIDs.Loaded() { + if err := imagesPerformersTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.PerformerIDs.List()); err != nil { + return err + } + } + + if updatedObject.TagIDs.Loaded() { + if err := imagesTagsTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.TagIDs.List()); err != nil { + return err + } + } + + if updatedObject.GalleryIDs.Loaded() { + if err := imageGalleriesTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.GalleryIDs.List()); err != nil { + return err + } + } + + if updatedObject.Files.Loaded() { + fileIDs := make([]file.ID, len(updatedObject.Files.List())) + for i, f := range updatedObject.Files.List() { + fileIDs[i] = f.ID } - images = append(images, image) + if err := imagesFilesTableMgr.replaceJoins(ctx, updatedObject.ID, fileIDs); err != nil { + return err + } + } + return nil +} + +func (qb *ImageStore) Destroy(ctx context.Context, id int) error { + return qb.tableMgr.destroyExisting(ctx, []int{id}) +} + +func (qb *ImageStore) Find(ctx context.Context, id int) (*models.Image, error) { + return qb.find(ctx, id) +} + +func (qb *ImageStore) FindMany(ctx context.Context, ids []int) ([]*models.Image, error) { + q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(ids)) + unsorted, err := qb.getMany(ctx, q) + if err != nil { + return nil, err + } + + images := make([]*models.Image, len(ids)) + + for _, s := range unsorted { + i := intslice.IntIndex(ids, s.ID) + images[i] = s + } + + for i := range images { + if images[i] == nil { + return nil, fmt.Errorf("image with id %d not found", ids[i]) + } } return images, nil } -func (qb *imageQueryBuilder) find(id int) (*models.Image, error) { - var ret models.Image - if err := qb.get(id, &ret); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, nil - } +func (qb *ImageStore) selectDataset() *goqu.SelectDataset { + table := qb.table() + files := fileTableMgr.table + folders := folderTableMgr.table + checksum := fingerprintTableMgr.table + + return dialect.From(table).LeftJoin( + imagesFilesJoinTable, + goqu.On( + imagesFilesJoinTable.Col(imageIDColumn).Eq(table.Col(idColumn)), + imagesFilesJoinTable.Col("primary").Eq(1), + ), + ).LeftJoin( + files, + goqu.On(files.Col(idColumn).Eq(imagesFilesJoinTable.Col(fileIDColumn))), + ).LeftJoin( + folders, + goqu.On(folders.Col(idColumn).Eq(files.Col("parent_folder_id"))), + ).LeftJoin( + checksum, + goqu.On( + checksum.Col(fileIDColumn).Eq(imagesFilesJoinTable.Col(fileIDColumn)), + checksum.Col("type").Eq(file.FingerprintTypeMD5), + ), + ).Select( + qb.table().All(), + imagesFilesJoinTable.Col(fileIDColumn).As("primary_file_id"), + folders.Col("path").As("primary_file_folder_path"), + files.Col("basename").As("primary_file_basename"), + checksum.Col("fingerprint").As("primary_file_checksum"), + ) +} + +func (qb *ImageStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Image, error) { + ret, err := qb.getMany(ctx, q) + if err != nil { return nil, err } - return &ret, nil + + if len(ret) == 0 { + return nil, sql.ErrNoRows + } + + return ret[0], nil } -func (qb *imageQueryBuilder) FindByChecksum(checksum string) (*models.Image, error) { - query := "SELECT * FROM images WHERE checksum = ? LIMIT 1" - args := []interface{}{checksum} - return qb.queryImage(query, args) +func (qb *ImageStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Image, error) { + const single = false + var ret []*models.Image + var lastID int + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f imageQueryRow + if err := r.StructScan(&f); err != nil { + return err + } + + i := f.resolve() + + if i.ID == lastID { + return fmt.Errorf("internal error: multiple rows returned for single image id %d", i.ID) + } + lastID = i.ID + + ret = append(ret, i) + return nil + }); err != nil { + return nil, err + } + + return ret, nil } -func (qb *imageQueryBuilder) FindByPath(path string) (*models.Image, error) { - query := selectAll(imageTable) + "WHERE path = ? LIMIT 1" - args := []interface{}{path} - return qb.queryImage(query, args) +func (qb *ImageStore) GetFiles(ctx context.Context, id int) ([]*file.ImageFile, error) { + fileIDs, err := qb.filesRepository().get(ctx, id) + if err != nil { + return nil, err + } + + // use fileStore to load files + files, err := qb.fileStore.Find(ctx, fileIDs...) + if err != nil { + return nil, err + } + + ret := make([]*file.ImageFile, len(files)) + for i, f := range files { + var ok bool + ret[i], ok = f.(*file.ImageFile) + if !ok { + return nil, fmt.Errorf("expected file to be *file.ImageFile not %T", f) + } + } + + return ret, nil } -func (qb *imageQueryBuilder) FindByGalleryID(galleryID int) ([]*models.Image, error) { - args := []interface{}{galleryID} - sort := "path" - sortDir := models.SortDirectionEnumAsc - return qb.queryImages(imagesForGalleryQuery+qb.getImageSort(&models.FindFilterType{ - Sort: &sort, - Direction: &sortDir, - }), args) +func (qb *ImageStore) GetManyFileIDs(ctx context.Context, ids []int) ([][]file.ID, error) { + const primaryOnly = false + return qb.filesRepository().getMany(ctx, ids, primaryOnly) } -func (qb *imageQueryBuilder) CountByGalleryID(galleryID int) (int, error) { - args := []interface{}{galleryID} - return qb.runCountQuery(qb.buildCountQuery(countImagesForGalleryQuery), args) +func (qb *ImageStore) find(ctx context.Context, id int) (*models.Image, error) { + q := qb.selectDataset().Where(qb.tableMgr.byID(id)) + + ret, err := qb.get(ctx, q) + if err != nil { + return nil, fmt.Errorf("getting image by id %d: %w", id, err) + } + + return ret, nil } -func (qb *imageQueryBuilder) Count() (int, error) { - return qb.runCountQuery(qb.buildCountQuery("SELECT images.id FROM images"), nil) +func (qb *ImageStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Image, error) { + table := qb.table() + + q := qb.selectDataset().Prepared(true).Where( + table.Col(idColumn).Eq( + sq, + ), + ) + + return qb.getMany(ctx, q) } -func (qb *imageQueryBuilder) Size() (float64, error) { - return qb.runSumQuery("SELECT SUM(cast(size as double)) as sum FROM images", nil) +func (qb *ImageStore) FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Image, error) { + table := qb.table() + + sq := dialect.From(table). + InnerJoin( + imagesFilesJoinTable, + goqu.On(table.Col(idColumn).Eq(imagesFilesJoinTable.Col(imageIDColumn))), + ). + Select(table.Col(idColumn)).Where(imagesFilesJoinTable.Col(fileIDColumn).Eq(fileID)) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting image by file id %d: %w", fileID, err) + } + + return ret, nil } -func (qb *imageQueryBuilder) All() ([]*models.Image, error) { - return qb.queryImages(selectAll(imageTable)+qb.getImageSort(nil), nil) +func (qb *ImageStore) CountByFileID(ctx context.Context, fileID file.ID) (int, error) { + joinTable := imagesFilesJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(fileIDColumn).Eq(fileID)) + return count(ctx, q) } -func (qb *imageQueryBuilder) validateFilter(imageFilter *models.ImageFilterType) error { +func (qb *ImageStore) FindByFingerprints(ctx context.Context, fp []file.Fingerprint) ([]*models.Image, error) { + table := qb.table() + fingerprintTable := fingerprintTableMgr.table + + var ex []exp.Expression + + for _, v := range fp { + ex = append(ex, goqu.And( + fingerprintTable.Col("type").Eq(v.Type), + fingerprintTable.Col("fingerprint").Eq(v.Fingerprint), + )) + } + + sq := dialect.From(table). + InnerJoin( + imagesFilesJoinTable, + goqu.On(table.Col(idColumn).Eq(imagesFilesJoinTable.Col(imageIDColumn))), + ). + InnerJoin( + fingerprintTable, + goqu.On(fingerprintTable.Col(fileIDColumn).Eq(imagesFilesJoinTable.Col(fileIDColumn))), + ). + Select(table.Col(idColumn)).Where(goqu.Or(ex...)) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting image by fingerprints: %w", err) + } + + return ret, nil +} + +func (qb *ImageStore) FindByChecksum(ctx context.Context, checksum string) ([]*models.Image, error) { + return qb.FindByFingerprints(ctx, []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: checksum, + }, + }) +} + +func (qb *ImageStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Image, error) { + table := qb.table() + fileTable := fileTableMgr.table + folderTable := folderTableMgr.table + + sq := dialect.From(table). + InnerJoin( + galleriesImagesJoinTable, + goqu.On(table.Col(idColumn).Eq(galleriesImagesJoinTable.Col(imageIDColumn))), + ). + Select(table.Col(idColumn)).Where( + galleriesImagesJoinTable.Col("gallery_id").Eq(galleryID), + ) + + q := qb.selectDataset().Prepared(true).Where( + table.Col(idColumn).Eq( + sq, + ), + ).Order(folderTable.Col("path").Asc(), fileTable.Col("basename").Asc()) + + ret, err := qb.getMany(ctx, q) + if err != nil { + return nil, fmt.Errorf("getting images for gallery %d: %w", galleryID, err) + } + + return ret, nil +} + +func (qb *ImageStore) CountByGalleryID(ctx context.Context, galleryID int) (int, error) { + joinTable := goqu.T(galleriesImagesTable) + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col("gallery_id").Eq(galleryID)) + return count(ctx, q) +} + +func (qb *ImageStore) FindByFolderID(ctx context.Context, folderID file.FolderID) ([]*models.Image, error) { + table := qb.table() + fileTable := goqu.T(fileTable) + + sq := dialect.From(table). + InnerJoin( + imagesFilesJoinTable, + goqu.On(table.Col(idColumn).Eq(imagesFilesJoinTable.Col(imageIDColumn))), + ). + InnerJoin( + fileTable, + goqu.On(imagesFilesJoinTable.Col(fileIDColumn).Eq(fileTable.Col(idColumn))), + ). + Select(table.Col(idColumn)).Where( + fileTable.Col("parent_folder_id").Eq(folderID), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting image by folder: %w", err) + } + + return ret, nil +} + +func (qb *ImageStore) FindByZipFileID(ctx context.Context, zipFileID file.ID) ([]*models.Image, error) { + table := qb.table() + fileTable := goqu.T(fileTable) + + sq := dialect.From(table). + InnerJoin( + imagesFilesJoinTable, + goqu.On(table.Col(idColumn).Eq(imagesFilesJoinTable.Col(imageIDColumn))), + ). + InnerJoin( + fileTable, + goqu.On(imagesFilesJoinTable.Col(fileIDColumn).Eq(fileTable.Col(idColumn))), + ). + Select(table.Col(idColumn)).Where( + fileTable.Col("zip_file_id").Eq(zipFileID), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting image by zip file: %w", err) + } + + return ret, nil +} + +func (qb *ImageStore) Count(ctx context.Context) (int, error) { + q := dialect.Select(goqu.COUNT("*")).From(qb.table()) + return count(ctx, q) +} + +func (qb *ImageStore) Size(ctx context.Context) (float64, error) { + table := qb.table() + fileTable := fileTableMgr.table + q := dialect.Select( + goqu.SUM(fileTableMgr.table.Col("size")), + ).From(table).InnerJoin( + imagesFilesJoinTable, + goqu.On(table.Col(idColumn).Eq(imagesFilesJoinTable.Col(imageIDColumn))), + ).InnerJoin( + fileTable, + goqu.On(imagesFilesJoinTable.Col(fileIDColumn).Eq(fileTable.Col(idColumn))), + ) + var ret float64 + if err := querySimple(ctx, q, &ret); err != nil { + return 0, err + } + + return ret, nil +} + +func (qb *ImageStore) All(ctx context.Context) ([]*models.Image, error) { + return qb.getMany(ctx, qb.selectDataset()) +} + +func (qb *ImageStore) validateFilter(imageFilter *models.ImageFilterType) error { const and = "AND" const or = "OR" const not = "NOT" @@ -224,41 +607,70 @@ func (qb *imageQueryBuilder) validateFilter(imageFilter *models.ImageFilterType) return nil } -func (qb *imageQueryBuilder) makeFilter(imageFilter *models.ImageFilterType) *filterBuilder { +func (qb *ImageStore) makeFilter(ctx context.Context, imageFilter *models.ImageFilterType) *filterBuilder { query := &filterBuilder{} if imageFilter.And != nil { - query.and(qb.makeFilter(imageFilter.And)) + query.and(qb.makeFilter(ctx, imageFilter.And)) } if imageFilter.Or != nil { - query.or(qb.makeFilter(imageFilter.Or)) + query.or(qb.makeFilter(ctx, imageFilter.Or)) } if imageFilter.Not != nil { - query.not(qb.makeFilter(imageFilter.Not)) + query.not(qb.makeFilter(ctx, imageFilter.Not)) } - query.handleCriterion(stringCriterionHandler(imageFilter.Checksum, "images.checksum")) - query.handleCriterion(stringCriterionHandler(imageFilter.Title, "images.title")) - query.handleCriterion(stringCriterionHandler(imageFilter.Path, "images.path")) - query.handleCriterion(intCriterionHandler(imageFilter.Rating, "images.rating")) - query.handleCriterion(intCriterionHandler(imageFilter.OCounter, "images.o_counter")) - query.handleCriterion(boolCriterionHandler(imageFilter.Organized, "images.organized")) - query.handleCriterion(resolutionCriterionHandler(imageFilter.Resolution, "images.height", "images.width")) - query.handleCriterion(imageIsMissingCriterionHandler(qb, imageFilter.IsMissing)) + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { + if imageFilter.Checksum != nil { + qb.addImagesFilesTable(f) + f.addInnerJoin(fingerprintTable, "fingerprints_md5", "images_files.file_id = fingerprints_md5.file_id AND fingerprints_md5.type = 'md5'") + } - query.handleCriterion(imageTagsCriterionHandler(qb, imageFilter.Tags)) - query.handleCriterion(imageTagCountCriterionHandler(qb, imageFilter.TagCount)) - query.handleCriterion(imageGalleriesCriterionHandler(qb, imageFilter.Galleries)) - query.handleCriterion(imagePerformersCriterionHandler(qb, imageFilter.Performers)) - query.handleCriterion(imagePerformerCountCriterionHandler(qb, imageFilter.PerformerCount)) - query.handleCriterion(imageStudioCriterionHandler(qb, imageFilter.Studios)) - query.handleCriterion(imagePerformerTagsCriterionHandler(qb, imageFilter.PerformerTags)) - query.handleCriterion(imagePerformerFavoriteCriterionHandler(imageFilter.PerformerFavorite)) + stringCriterionHandler(imageFilter.Checksum, "fingerprints_md5.fingerprint")(ctx, f) + })) + query.handleCriterion(ctx, stringCriterionHandler(imageFilter.Title, "images.title")) + + query.handleCriterion(ctx, pathCriterionHandler(imageFilter.Path, "folders.path", "files.basename", qb.addFoldersTable)) + query.handleCriterion(ctx, imageFileCountCriterionHandler(qb, imageFilter.FileCount)) + query.handleCriterion(ctx, intCriterionHandler(imageFilter.Rating, "images.rating", nil)) + query.handleCriterion(ctx, intCriterionHandler(imageFilter.OCounter, "images.o_counter", nil)) + query.handleCriterion(ctx, boolCriterionHandler(imageFilter.Organized, "images.organized", nil)) + + query.handleCriterion(ctx, resolutionCriterionHandler(imageFilter.Resolution, "image_files.height", "image_files.width", qb.addImageFilesTable)) + query.handleCriterion(ctx, imageIsMissingCriterionHandler(qb, imageFilter.IsMissing)) + + query.handleCriterion(ctx, imageTagsCriterionHandler(qb, imageFilter.Tags)) + query.handleCriterion(ctx, imageTagCountCriterionHandler(qb, imageFilter.TagCount)) + query.handleCriterion(ctx, imageGalleriesCriterionHandler(qb, imageFilter.Galleries)) + query.handleCriterion(ctx, imagePerformersCriterionHandler(qb, imageFilter.Performers)) + query.handleCriterion(ctx, imagePerformerCountCriterionHandler(qb, imageFilter.PerformerCount)) + query.handleCriterion(ctx, imageStudioCriterionHandler(qb, imageFilter.Studios)) + query.handleCriterion(ctx, imagePerformerTagsCriterionHandler(qb, imageFilter.PerformerTags)) + query.handleCriterion(ctx, imagePerformerFavoriteCriterionHandler(imageFilter.PerformerFavorite)) return query } -func (qb *imageQueryBuilder) makeQuery(imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) (*queryBuilder, error) { +func (qb *ImageStore) addImagesFilesTable(f *filterBuilder) { + f.addLeftJoin(imagesFilesTable, "", "images_files.image_id = images.id") +} + +func (qb *ImageStore) addFilesTable(f *filterBuilder) { + qb.addImagesFilesTable(f) + f.addLeftJoin(fileTable, "", "images_files.file_id = files.id") +} + +func (qb *ImageStore) addFoldersTable(f *filterBuilder) { + qb.addFilesTable(f) + f.addLeftJoin(folderTable, "", "files.parent_folder_id = folders.id") +} + +func (qb *ImageStore) addImageFilesTable(f *filterBuilder) { + qb.addImagesFilesTable(f) + f.addLeftJoin(imageFileTable, "", "image_files.file_id = images_files.file_id") +} + +func (qb *ImageStore) makeQuery(ctx context.Context, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) (*queryBuilder, error) { if imageFilter == nil { imageFilter = &models.ImageFilterType{} } @@ -270,34 +682,53 @@ func (qb *imageQueryBuilder) makeQuery(imageFilter *models.ImageFilterType, find distinctIDs(&query, imageTable) if q := findFilter.Q; q != nil && *q != "" { - searchColumns := []string{"images.title", "images.path", "images.checksum"} + query.addJoins( + join{ + table: imagesFilesTable, + onClause: "images_files.image_id = images.id", + }, + join{ + table: fileTable, + onClause: "images_files.file_id = files.id", + }, + join{ + table: folderTable, + onClause: "files.parent_folder_id = folders.id", + }, + join{ + table: fingerprintTable, + onClause: "files_fingerprints.file_id = images_files.file_id", + }, + ) + + searchColumns := []string{"images.title", "folders.path", "files.basename", "files_fingerprints.fingerprint"} query.parseQueryString(searchColumns, *q) } if err := qb.validateFilter(imageFilter); err != nil { return nil, err } - filter := qb.makeFilter(imageFilter) + filter := qb.makeFilter(ctx, imageFilter) query.addFilter(filter) - query.sortAndPagination = qb.getImageSort(findFilter) + getPagination(findFilter) + qb.setImageSortAndPagination(&query, findFilter) return &query, nil } -func (qb *imageQueryBuilder) Query(options models.ImageQueryOptions) (*models.ImageQueryResult, error) { - query, err := qb.makeQuery(options.ImageFilter, options.FindFilter) +func (qb *ImageStore) Query(ctx context.Context, options models.ImageQueryOptions) (*models.ImageQueryResult, error) { + query, err := qb.makeQuery(ctx, options.ImageFilter, options.FindFilter) if err != nil { return nil, err } - result, err := qb.queryGroupedFields(options, *query) + result, err := qb.queryGroupedFields(ctx, options, *query) if err != nil { return nil, fmt.Errorf("error querying aggregate fields: %w", err) } - idsResult, err := query.findIDs() + idsResult, err := query.findIDs(ctx) if err != nil { return nil, fmt.Errorf("error finding IDs: %w", err) } @@ -306,7 +737,7 @@ func (qb *imageQueryBuilder) Query(options models.ImageQueryOptions) (*models.Im return result, nil } -func (qb *imageQueryBuilder) queryGroupedFields(options models.ImageQueryOptions, query queryBuilder) (*models.ImageQueryResult, error) { +func (qb *ImageStore) queryGroupedFields(ctx context.Context, options models.ImageQueryOptions, query queryBuilder) (*models.ImageQueryResult, error) { if !options.Count && !options.Megapixels && !options.TotalSize { // nothing to do - return empty result return models.NewImageQueryResult(qb), nil @@ -318,15 +749,16 @@ func (qb *imageQueryBuilder) queryGroupedFields(options models.ImageQueryOptions aggregateQuery.addColumn("COUNT(temp.id) as total") } - if options.Megapixels { - query.addColumn("COALESCE(images.width, 0) * COALESCE(images.height, 0) / 1000000 as megapixels") - aggregateQuery.addColumn("COALESCE(SUM(temp.megapixels), 0) as megapixels") - } + // TODO - this doesn't work yet + // if options.Megapixels { + // query.addColumn("COALESCE(images.width, 0) * COALESCE(images.height, 0) / 1000000 as megapixels") + // aggregateQuery.addColumn("COALESCE(SUM(temp.megapixels), 0) as megapixels") + // } - if options.TotalSize { - query.addColumn("COALESCE(images.size, 0) as size") - aggregateQuery.addColumn("COALESCE(SUM(temp.size), 0) as size") - } + // if options.TotalSize { + // query.addColumn("COALESCE(images.size, 0) as size") + // aggregateQuery.addColumn("COALESCE(SUM(temp.size), 0) as size") + // } const includeSortPagination = false aggregateQuery.from = fmt.Sprintf("(%s) as temp", query.toSQL(includeSortPagination)) @@ -336,7 +768,7 @@ func (qb *imageQueryBuilder) queryGroupedFields(options models.ImageQueryOptions Megapixels float64 Size float64 }{} - if err := qb.repository.queryStruct(aggregateQuery.toSQL(includeSortPagination), query.args, &out); err != nil { + if err := qb.repository.queryStruct(ctx, aggregateQuery.toSQL(includeSortPagination), query.args, &out); err != nil { return nil, err } @@ -347,17 +779,27 @@ func (qb *imageQueryBuilder) queryGroupedFields(options models.ImageQueryOptions return ret, nil } -func (qb *imageQueryBuilder) QueryCount(imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) (int, error) { - query, err := qb.makeQuery(imageFilter, findFilter) +func (qb *ImageStore) QueryCount(ctx context.Context, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) (int, error) { + query, err := qb.makeQuery(ctx, imageFilter, findFilter) if err != nil { return 0, err } - return query.executeCount() + return query.executeCount(ctx) } -func imageIsMissingCriterionHandler(qb *imageQueryBuilder, isMissing *string) criterionHandlerFunc { - return func(f *filterBuilder) { +func imageFileCountCriterionHandler(qb *ImageStore, fileCount *models.IntCriterionInput) criterionHandlerFunc { + h := countCriterionHandlerBuilder{ + primaryTable: imageTable, + joinTable: imagesFilesTable, + primaryFK: imageIDColumn, + } + + return h.handler(fileCount) +} + +func imageIsMissingCriterionHandler(qb *ImageStore, isMissing *string) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if isMissing != nil && *isMissing != "" { switch *isMissing { case "studio": @@ -378,7 +820,7 @@ func imageIsMissingCriterionHandler(qb *imageQueryBuilder, isMissing *string) cr } } -func (qb *imageQueryBuilder) getMultiCriterionHandlerBuilder(foreignTable, joinTable, foreignFK string, addJoinsFunc func(f *filterBuilder)) multiCriterionHandlerBuilder { +func (qb *ImageStore) getMultiCriterionHandlerBuilder(foreignTable, joinTable, foreignFK string, addJoinsFunc func(f *filterBuilder)) multiCriterionHandlerBuilder { return multiCriterionHandlerBuilder{ primaryTable: imageTable, foreignTable: foreignTable, @@ -389,7 +831,7 @@ func (qb *imageQueryBuilder) getMultiCriterionHandlerBuilder(foreignTable, joinT } } -func imageTagsCriterionHandler(qb *imageQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { +func imageTagsCriterionHandler(qb *ImageStore, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { h := joinedHierarchicalMultiCriterionHandlerBuilder{ tx: qb.tx, @@ -406,7 +848,7 @@ func imageTagsCriterionHandler(qb *imageQueryBuilder, tags *models.HierarchicalM return h.handler(tags) } -func imageTagCountCriterionHandler(qb *imageQueryBuilder, tagCount *models.IntCriterionInput) criterionHandlerFunc { +func imageTagCountCriterionHandler(qb *ImageStore, tagCount *models.IntCriterionInput) criterionHandlerFunc { h := countCriterionHandlerBuilder{ primaryTable: imageTable, joinTable: imagesTagsTable, @@ -416,17 +858,19 @@ func imageTagCountCriterionHandler(qb *imageQueryBuilder, tagCount *models.IntCr return h.handler(tagCount) } -func imageGalleriesCriterionHandler(qb *imageQueryBuilder, galleries *models.MultiCriterionInput) criterionHandlerFunc { +func imageGalleriesCriterionHandler(qb *ImageStore, galleries *models.MultiCriterionInput) criterionHandlerFunc { addJoinsFunc := func(f *filterBuilder) { - qb.galleriesRepository().join(f, "", "images.id") - f.addLeftJoin(galleryTable, "", "galleries_images.gallery_id = galleries.id") + if galleries.Modifier == models.CriterionModifierIncludes || galleries.Modifier == models.CriterionModifierIncludesAll { + f.addInnerJoin(galleriesImagesTable, "", "galleries_images.image_id = images.id") + f.addInnerJoin(galleryTable, "", "galleries_images.gallery_id = galleries.id") + } } h := qb.getMultiCriterionHandlerBuilder(galleryTable, galleriesImagesTable, galleryIDColumn, addJoinsFunc) return h.handler(galleries) } -func imagePerformersCriterionHandler(qb *imageQueryBuilder, performers *models.MultiCriterionInput) criterionHandlerFunc { +func imagePerformersCriterionHandler(qb *ImageStore, performers *models.MultiCriterionInput) criterionHandlerFunc { h := joinedMultiCriterionHandlerBuilder{ primaryTable: imageTable, joinTable: performersImagesTable, @@ -442,7 +886,7 @@ func imagePerformersCriterionHandler(qb *imageQueryBuilder, performers *models.M return h.handler(performers) } -func imagePerformerCountCriterionHandler(qb *imageQueryBuilder, performerCount *models.IntCriterionInput) criterionHandlerFunc { +func imagePerformerCountCriterionHandler(qb *ImageStore, performerCount *models.IntCriterionInput) criterionHandlerFunc { h := countCriterionHandlerBuilder{ primaryTable: imageTable, joinTable: performersImagesTable, @@ -453,7 +897,7 @@ func imagePerformerCountCriterionHandler(qb *imageQueryBuilder, performerCount * } func imagePerformerFavoriteCriterionHandler(performerfavorite *bool) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if performerfavorite != nil { f.addLeftJoin("performers_images", "", "images.id = performers_images.image_id") @@ -472,7 +916,7 @@ GROUP BY performers_images.image_id HAVING SUM(performers.favorite) = 0)`, "nofa } } -func imageStudioCriterionHandler(qb *imageQueryBuilder, studios *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { +func imageStudioCriterionHandler(qb *ImageStore, studios *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { h := hierarchicalMultiCriterionHandlerBuilder{ tx: qb.tx, @@ -486,8 +930,8 @@ func imageStudioCriterionHandler(qb *imageQueryBuilder, studios *models.Hierarch return h.handler(studios) } -func imagePerformerTagsCriterionHandler(qb *imageQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { +func imagePerformerTagsCriterionHandler(qb *ImageStore, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if tags != nil { if tags.Modifier == models.CriterionModifierIsNull || tags.Modifier == models.CriterionModifierNotNull { var notClause string @@ -506,7 +950,7 @@ func imagePerformerTagsCriterionHandler(qb *imageQueryBuilder, tags *models.Hier return } - valuesClause := getHierarchicalValues(qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) + valuesClause := getHierarchicalValues(ctx, qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) f.addWith(`performer_tags AS ( SELECT pi.image_id, t.column1 AS root_tag_id FROM performers_images pi @@ -521,41 +965,57 @@ INNER JOIN (` + valuesClause + `) t ON t.column2 = pt.tag_id } } -func (qb *imageQueryBuilder) getImageSort(findFilter *models.FindFilterType) string { - if findFilter == nil || findFilter.Sort == nil || *findFilter.Sort == "" { - return "" - } - sort := findFilter.GetSort("title") - direction := findFilter.GetDirection() +func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *models.FindFilterType) { + sortClause := "" - switch sort { - case "tag_count": - return getCountSort(imageTable, imagesTagsTable, imageIDColumn, direction) - case "performer_count": - return getCountSort(imageTable, performersImagesTable, imageIDColumn, direction) - default: - return getSort(sort, direction, "images") + if findFilter != nil && findFilter.Sort != nil && *findFilter.Sort != "" { + sort := findFilter.GetSort("title") + direction := findFilter.GetDirection() + + // translate sort field + if sort == "file_mod_time" { + sort = "mod_time" + } + + addFilesJoin := func() { + q.addJoins( + join{ + table: imagesFilesTable, + onClause: "images_files.image_id = images.id", + }, + join{ + table: fileTable, + onClause: "images_files.file_id = files.id", + }, + ) + } + + switch sort { + case "path": + addFilesJoin() + q.addJoins(join{ + table: folderTable, + onClause: "files.parent_folder_id = folders.id", + }) + sortClause = " ORDER BY folders.path " + direction + ", files.basename " + direction + case "file_count": + sortClause = getCountSort(imageTable, imagesFilesTable, imageIDColumn, direction) + case "tag_count": + sortClause = getCountSort(imageTable, imagesTagsTable, imageIDColumn, direction) + case "performer_count": + sortClause = getCountSort(imageTable, performersImagesTable, imageIDColumn, direction) + case "mod_time", "filesize": + addFilesJoin() + sortClause = getSort(sort, direction, "files") + default: + sortClause = getSort(sort, direction, "images") + } } + + q.sortAndPagination = sortClause + getPagination(findFilter) } -func (qb *imageQueryBuilder) queryImage(query string, args []interface{}) (*models.Image, error) { - results, err := qb.queryImages(query, args) - if err != nil || len(results) < 1 { - return nil, err - } - return results[0], nil -} - -func (qb *imageQueryBuilder) queryImages(query string, args []interface{}) ([]*models.Image, error) { - var ret models.Images - if err := qb.query(query, args, &ret); err != nil { - return nil, err - } - - return []*models.Image(ret), nil -} - -func (qb *imageQueryBuilder) galleriesRepository() *joinRepository { +func (qb *ImageStore) galleriesRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -566,16 +1026,31 @@ func (qb *imageQueryBuilder) galleriesRepository() *joinRepository { } } -func (qb *imageQueryBuilder) GetGalleryIDs(imageID int) ([]int, error) { - return qb.galleriesRepository().getIDs(imageID) +func (qb *ImageStore) filesRepository() *filesRepository { + return &filesRepository{ + repository: repository{ + tx: qb.tx, + tableName: imagesFilesTable, + idColumn: imageIDColumn, + }, + } } -func (qb *imageQueryBuilder) UpdateGalleries(imageID int, galleryIDs []int) error { - // Delete the existing joins and then create new ones - return qb.galleriesRepository().replace(imageID, galleryIDs) +func (qb *ImageStore) AddFileID(ctx context.Context, id int, fileID file.ID) error { + const firstPrimary = false + return imagesFilesTableMgr.insertJoins(ctx, id, firstPrimary, []file.ID{fileID}) } -func (qb *imageQueryBuilder) performersRepository() *joinRepository { +func (qb *ImageStore) GetGalleryIDs(ctx context.Context, imageID int) ([]int, error) { + return qb.galleriesRepository().getIDs(ctx, imageID) +} + +// func (qb *imageQueryBuilder) UpdateGalleries(ctx context.Context, imageID int, galleryIDs []int) error { +// // Delete the existing joins and then create new ones +// return qb.galleriesRepository().replace(ctx, imageID, galleryIDs) +// } + +func (qb *ImageStore) performersRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -586,16 +1061,16 @@ func (qb *imageQueryBuilder) performersRepository() *joinRepository { } } -func (qb *imageQueryBuilder) GetPerformerIDs(imageID int) ([]int, error) { - return qb.performersRepository().getIDs(imageID) +func (qb *ImageStore) GetPerformerIDs(ctx context.Context, imageID int) ([]int, error) { + return qb.performersRepository().getIDs(ctx, imageID) } -func (qb *imageQueryBuilder) UpdatePerformers(imageID int, performerIDs []int) error { +func (qb *ImageStore) UpdatePerformers(ctx context.Context, imageID int, performerIDs []int) error { // Delete the existing joins and then create new ones - return qb.performersRepository().replace(imageID, performerIDs) + return qb.performersRepository().replace(ctx, imageID, performerIDs) } -func (qb *imageQueryBuilder) tagsRepository() *joinRepository { +func (qb *ImageStore) tagsRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -606,11 +1081,11 @@ func (qb *imageQueryBuilder) tagsRepository() *joinRepository { } } -func (qb *imageQueryBuilder) GetTagIDs(imageID int) ([]int, error) { - return qb.tagsRepository().getIDs(imageID) +func (qb *ImageStore) GetTagIDs(ctx context.Context, imageID int) ([]int, error) { + return qb.tagsRepository().getIDs(ctx, imageID) } -func (qb *imageQueryBuilder) UpdateTags(imageID int, tagIDs []int) error { +func (qb *ImageStore) UpdateTags(ctx context.Context, imageID int, tagIDs []int) error { // Delete the existing joins and then create new ones - return qb.tagsRepository().replace(imageID, tagIDs) + return qb.tagsRepository().replace(ctx, imageID, tagIDs) } diff --git a/pkg/sqlite/image_test.go b/pkg/sqlite/image_test.go index 552db2cdf..a7cc7f61d 100644 --- a/pkg/sqlite/image_test.go +++ b/pkg/sqlite/image_test.go @@ -4,113 +4,1464 @@ package sqlite_test import ( - "database/sql" + "context" + "reflect" "strconv" "testing" + "time" - "github.com/stretchr/testify/assert" - + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" + "github.com/stretchr/testify/assert" ) -func TestImageFind(t *testing.T) { - withTxn(func(r models.Repository) error { - // assume that the first image is imageWithGalleryPath - sqb := r.Image() - - const imageIdx = 0 - imageID := imageIDs[imageIdx] - image, err := sqb.Find(imageID) - - if err != nil { - t.Errorf("Error finding image: %s", err.Error()) +func loadImageRelationships(ctx context.Context, expected models.Image, actual *models.Image) error { + if expected.GalleryIDs.Loaded() { + if err := actual.LoadGalleryIDs(ctx, db.Image); err != nil { + return err } - - assert.Equal(t, getImageStringValue(imageIdx, "Path"), image.Path) - - imageID = 0 - image, err = sqb.Find(imageID) - - if err != nil { - t.Errorf("Error finding image: %s", err.Error()) + } + if expected.TagIDs.Loaded() { + if err := actual.LoadTagIDs(ctx, db.Image); err != nil { + return err } + } + if expected.PerformerIDs.Loaded() { + if err := actual.LoadPerformerIDs(ctx, db.Image); err != nil { + return err + } + } + if expected.Files.Loaded() { + if err := actual.LoadFiles(ctx, db.Image); err != nil { + return err + } + } - assert.Nil(t, image) + // clear Path, Checksum, PrimaryFileID + if expected.Path == "" { + actual.Path = "" + } + if expected.Checksum == "" { + actual.Checksum = "" + } + if expected.PrimaryFileID == nil { + actual.PrimaryFileID = nil + } - return nil - }) + return nil } -func TestImageFindByPath(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() +func Test_imageQueryBuilder_Create(t *testing.T) { + var ( + title = "title" + rating = 3 + ocounter = 5 + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) - const imageIdx = 1 - imagePath := getImageStringValue(imageIdx, "Path") - image, err := sqb.FindByPath(imagePath) + imageFile = makeFileWithID(fileIdxStartImageFiles) + ) - if err != nil { - t.Errorf("Error finding image: %s", err.Error()) - } + tests := []struct { + name string + newObject models.Image + wantErr bool + }{ + { + "full", + models.Image{ + Title: title, + Rating: &rating, + Organized: true, + OCounter: ocounter, + StudioID: &studioIDs[studioIdxWithImage], + CreatedAt: createdAt, + UpdatedAt: updatedAt, + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithImage]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithImage], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithImage], performerIDs[performerIdx1WithDupName]}), + }, + false, + }, + { + "with file", + models.Image{ + Title: title, + Rating: &rating, + Organized: true, + OCounter: ocounter, + StudioID: &studioIDs[studioIdxWithImage], + Files: models.NewRelatedImageFiles([]*file.ImageFile{ + imageFile.(*file.ImageFile), + }), + PrimaryFileID: &imageFile.Base().ID, + Path: imageFile.Base().Path, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithImage]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithImage], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithImage], performerIDs[performerIdx1WithDupName]}), + }, + false, + }, + { + "invalid studio id", + models.Image{ + StudioID: &invalidID, + }, + true, + }, + { + "invalid gallery id", + models.Image{ + GalleryIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid tag id", + models.Image{ + TagIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid performer id", + models.Image{ + PerformerIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + } - assert.Equal(t, imageIDs[imageIdx], image.ID) - assert.Equal(t, imagePath, image.Path) + qb := db.Image - imagePath = "not exist" - image, err = sqb.FindByPath(imagePath) + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) - if err != nil { - t.Errorf("Error finding image: %s", err.Error()) - } + var fileIDs []file.ID + if tt.newObject.Files.Loaded() { + for _, f := range tt.newObject.Files.List() { + fileIDs = append(fileIDs, f.ID) + } + } + s := tt.newObject + if err := qb.Create(ctx, &models.ImageCreateInput{ + Image: &s, + FileIDs: fileIDs, + }); (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.Create() error = %v, wantErr = %v", err, tt.wantErr) + } - assert.Nil(t, image) + if tt.wantErr { + assert.Zero(s.ID) + return + } - return nil - }) + assert.NotZero(s.ID) + + copy := tt.newObject + copy.ID = s.ID + + // load relationships + if err := loadImageRelationships(ctx, copy, &s); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + + assert.Equal(copy, s) + + // ensure can find the image + found, err := qb.Find(ctx, s.ID) + if err != nil { + t.Errorf("imageQueryBuilder.Find() error = %v", err) + } + + // load relationships + if err := loadImageRelationships(ctx, copy, found); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + + assert.Equal(copy, *found) + + return + }) + } } -func TestImageFindByGalleryID(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() - - images, err := sqb.FindByGalleryID(galleryIDs[galleryIdxWithTwoImages]) - - if err != nil { - t.Errorf("Error finding images: %s", err.Error()) +func clearImageFileIDs(image *models.Image) { + if image.Files.Loaded() { + for _, f := range image.Files.List() { + f.Base().ID = 0 } + } +} - assert.Len(t, images, 2) - assert.Equal(t, imageIDs[imageIdx1WithGallery], images[0].ID) - assert.Equal(t, imageIDs[imageIdx2WithGallery], images[1].ID) +func makeImageFileWithID(i int) *file.ImageFile { + ret := makeImageFile(i) + ret.ID = imageFileIDs[i] + return ret +} - images, err = sqb.FindByGalleryID(galleryIDs[galleryIdxWithScene]) +func Test_imageQueryBuilder_Update(t *testing.T) { + var ( + title = "title" + rating = 3 + ocounter = 5 + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + ) - if err != nil { - t.Errorf("Error finding images: %s", err.Error()) + tests := []struct { + name string + updatedObject *models.Image + wantErr bool + }{ + { + "full", + &models.Image{ + ID: imageIDs[imageIdxWithGallery], + Title: title, + Rating: &rating, + Organized: true, + OCounter: ocounter, + StudioID: &studioIDs[studioIdxWithImage], + CreatedAt: createdAt, + UpdatedAt: updatedAt, + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithImage]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithImage], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithImage], performerIDs[performerIdx1WithDupName]}), + }, + false, + }, + { + "clear nullables", + &models.Image{ + ID: imageIDs[imageIdxWithGallery], + GalleryIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Organized: true, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "clear gallery ids", + &models.Image{ + ID: imageIDs[imageIdxWithGallery], + GalleryIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Organized: true, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "clear tag ids", + &models.Image{ + ID: imageIDs[imageIdxWithTag], + GalleryIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Organized: true, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "clear performer ids", + &models.Image{ + ID: imageIDs[imageIdxWithPerformer], + GalleryIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Organized: true, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + false, + }, + { + "invalid studio id", + &models.Image{ + ID: imageIDs[imageIdxWithGallery], + Organized: true, + StudioID: &invalidID, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + { + "invalid gallery id", + &models.Image{ + ID: imageIDs[imageIdxWithGallery], + Organized: true, + GalleryIDs: models.NewRelatedIDs([]int{invalidID}), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + { + "invalid tag id", + &models.Image{ + ID: imageIDs[imageIdxWithGallery], + Organized: true, + TagIDs: models.NewRelatedIDs([]int{invalidID}), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + { + "invalid performer id", + &models.Image{ + ID: imageIDs[imageIdxWithGallery], + Organized: true, + PerformerIDs: models.NewRelatedIDs([]int{invalidID}), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + true, + }, + } + + qb := db.Image + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + copy := *tt.updatedObject + + if err := qb.Update(ctx, tt.updatedObject); (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.Update() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wantErr { + return + } + + s, err := qb.Find(ctx, tt.updatedObject.ID) + if err != nil { + t.Errorf("imageQueryBuilder.Find() error = %v", err) + } + + // load relationships + if err := loadImageRelationships(ctx, copy, s); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + + assert.Equal(copy, *s) + + return + }) + } +} + +func clearImagePartial() models.ImagePartial { + // leave mandatory fields + return models.ImagePartial{ + Title: models.OptionalString{Set: true, Null: true}, + Rating: models.OptionalInt{Set: true, Null: true}, + StudioID: models.OptionalInt{Set: true, Null: true}, + GalleryIDs: &models.UpdateIDs{Mode: models.RelationshipUpdateModeSet}, + TagIDs: &models.UpdateIDs{Mode: models.RelationshipUpdateModeSet}, + PerformerIDs: &models.UpdateIDs{Mode: models.RelationshipUpdateModeSet}, + } +} + +func Test_imageQueryBuilder_UpdatePartial(t *testing.T) { + var ( + title = "title" + rating = 3 + ocounter = 5 + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + ) + + tests := []struct { + name string + id int + partial models.ImagePartial + want models.Image + wantErr bool + }{ + { + "full", + imageIDs[imageIdx1WithGallery], + models.ImagePartial{ + Title: models.NewOptionalString(title), + Rating: models.NewOptionalInt(rating), + Organized: models.NewOptionalBool(true), + OCounter: models.NewOptionalInt(ocounter), + StudioID: models.NewOptionalInt(studioIDs[studioIdxWithImage]), + CreatedAt: models.NewOptionalTime(createdAt), + UpdatedAt: models.NewOptionalTime(updatedAt), + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdxWithImage]}, + Mode: models.RelationshipUpdateModeSet, + }, + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithImage], tagIDs[tagIdx1WithDupName]}, + Mode: models.RelationshipUpdateModeSet, + }, + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithImage], performerIDs[performerIdx1WithDupName]}, + Mode: models.RelationshipUpdateModeSet, + }, + }, + models.Image{ + ID: imageIDs[imageIdx1WithGallery], + Title: title, + Rating: &rating, + Organized: true, + OCounter: ocounter, + StudioID: &studioIDs[studioIdxWithImage], + Files: models.NewRelatedImageFiles([]*file.ImageFile{ + makeImageFile(imageIdx1WithGallery), + }), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithImage]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithImage], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithImage], performerIDs[performerIdx1WithDupName]}), + }, + false, + }, + { + "clear all", + imageIDs[imageIdx1WithGallery], + clearImagePartial(), + models.Image{ + ID: imageIDs[imageIdx1WithGallery], + OCounter: getOCounter(imageIdx1WithGallery), + Files: models.NewRelatedImageFiles([]*file.ImageFile{ + makeImageFile(imageIdx1WithGallery), + }), + GalleryIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + }, + false, + }, + { + "invalid id", + invalidID, + models.ImagePartial{}, + models.Image{}, + true, + }, + } + for _, tt := range tests { + qb := db.Image + + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + got, err := qb.UpdatePartial(ctx, tt.id, tt.partial) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.UpdatePartial() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + // load relationships + if err := loadImageRelationships(ctx, tt.want, got); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + clearImageFileIDs(got) + + assert.Equal(tt.want, *got) + + s, err := qb.Find(ctx, tt.id) + if err != nil { + t.Errorf("imageQueryBuilder.Find() error = %v", err) + } + + // load relationships + if err := loadImageRelationships(ctx, tt.want, s); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + clearImageFileIDs(s) + assert.Equal(tt.want, *s) + }) + } +} + +func Test_imageQueryBuilder_UpdatePartialRelationships(t *testing.T) { + tests := []struct { + name string + id int + partial models.ImagePartial + want models.Image + wantErr bool + }{ + { + "add galleries", + imageIDs[imageIdxWithGallery], + models.ImagePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdx1WithImage], galleryIDs[galleryIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Image{ + GalleryIDs: models.NewRelatedIDs(append(indexesToIDs(galleryIDs, imageGalleries[imageIdxWithGallery]), + galleryIDs[galleryIdx1WithImage], + galleryIDs[galleryIdx1WithPerformer], + )), + }, + false, + }, + { + "add tags", + imageIDs[imageIdxWithTwoTags], + models.ImagePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithDupName], tagIDs[tagIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Image{ + TagIDs: models.NewRelatedIDs(append(indexesToIDs(tagIDs, imageTags[imageIdxWithTwoTags]), + tagIDs[tagIdx1WithDupName], + tagIDs[tagIdx1WithGallery], + )), + }, + false, + }, + { + "add performers", + imageIDs[imageIdxWithTwoPerformers], + models.ImagePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithDupName], performerIDs[performerIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Image{ + PerformerIDs: models.NewRelatedIDs(append(indexesToIDs(performerIDs, imagePerformers[imageIdxWithTwoPerformers]), + performerIDs[performerIdx1WithDupName], + performerIDs[performerIdx1WithGallery], + )), + }, + false, + }, + { + "add duplicate galleries", + imageIDs[imageIdxWithGallery], + models.ImagePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdxWithImage], galleryIDs[galleryIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Image{ + GalleryIDs: models.NewRelatedIDs(append(indexesToIDs(galleryIDs, imageGalleries[imageIdxWithGallery]), + galleryIDs[galleryIdx1WithPerformer], + )), + }, + false, + }, + { + "add duplicate tags", + imageIDs[imageIdxWithTwoTags], + models.ImagePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithImage], tagIDs[tagIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Image{ + TagIDs: models.NewRelatedIDs(append(indexesToIDs(tagIDs, imageTags[imageIdxWithTwoTags]), + tagIDs[tagIdx1WithGallery], + )), + }, + false, + }, + { + "add duplicate performers", + imageIDs[imageIdxWithTwoPerformers], + models.ImagePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithImage], performerIDs[performerIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Image{ + PerformerIDs: models.NewRelatedIDs(append(indexesToIDs(performerIDs, imagePerformers[imageIdxWithTwoPerformers]), + performerIDs[performerIdx1WithGallery], + )), + }, + false, + }, + { + "add invalid galleries", + imageIDs[imageIdxWithGallery], + models.ImagePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{invalidID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Image{}, + true, + }, + { + "add invalid tags", + imageIDs[imageIdxWithTwoTags], + models.ImagePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{invalidID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Image{}, + true, + }, + { + "add invalid performers", + imageIDs[imageIdxWithTwoPerformers], + models.ImagePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{invalidID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Image{}, + true, + }, + { + "remove galleries", + imageIDs[imageIdxWithGallery], + models.ImagePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdxWithImage]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Image{ + GalleryIDs: models.NewRelatedIDs([]int{}), + }, + false, + }, + { + "remove tags", + imageIDs[imageIdxWithTwoTags], + models.ImagePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithImage]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Image{ + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx2WithImage]}), + }, + false, + }, + { + "remove performers", + imageIDs[imageIdxWithTwoPerformers], + models.ImagePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithImage]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Image{ + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx2WithImage]}), + }, + false, + }, + { + "remove unrelated galleries", + imageIDs[imageIdxWithGallery], + models.ImagePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdx1WithImage]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Image{ + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithImage]}), + }, + false, + }, + { + "remove unrelated tags", + imageIDs[imageIdxWithTwoTags], + models.ImagePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Image{ + TagIDs: models.NewRelatedIDs(indexesToIDs(tagIDs, imageTags[imageIdxWithTwoTags])), + }, + false, + }, + { + "remove unrelated performers", + imageIDs[imageIdxWithTwoPerformers], + models.ImagePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithDupName]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Image{ + PerformerIDs: models.NewRelatedIDs(indexesToIDs(performerIDs, imagePerformers[imageIdxWithTwoPerformers])), + }, + false, + }, + } + + for _, tt := range tests { + qb := db.Image + + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + got, err := qb.UpdatePartial(ctx, tt.id, tt.partial) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.UpdatePartial() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + s, err := qb.Find(ctx, tt.id) + if err != nil { + t.Errorf("imageQueryBuilder.Find() error = %v", err) + } + + // load relationships + if err := loadImageRelationships(ctx, tt.want, got); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + if err := loadImageRelationships(ctx, tt.want, s); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + + // only compare fields that were in the partial + if tt.partial.PerformerIDs != nil { + assert.Equal(tt.want.PerformerIDs, got.PerformerIDs) + assert.Equal(tt.want.PerformerIDs, s.PerformerIDs) + } + if tt.partial.TagIDs != nil { + assert.Equal(tt.want.TagIDs, got.TagIDs) + assert.Equal(tt.want.TagIDs, s.TagIDs) + } + if tt.partial.GalleryIDs != nil { + assert.Equal(tt.want.GalleryIDs, got.GalleryIDs) + assert.Equal(tt.want.GalleryIDs, s.GalleryIDs) + } + }) + } +} + +func Test_imageQueryBuilder_IncrementOCounter(t *testing.T) { + tests := []struct { + name string + id int + want int + wantErr bool + }{ + { + "increment", + imageIDs[1], + 2, + false, + }, + { + "invalid", + invalidID, + 0, + true, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.IncrementOCounter(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.IncrementOCounter() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("imageQueryBuilder.IncrementOCounter() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_imageQueryBuilder_DecrementOCounter(t *testing.T) { + tests := []struct { + name string + id int + want int + wantErr bool + }{ + { + "decrement", + imageIDs[2], + 1, + false, + }, + { + "zero", + imageIDs[0], + 0, + false, + }, + { + "invalid", + invalidID, + 0, + true, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.DecrementOCounter(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.DecrementOCounter() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("imageQueryBuilder.DecrementOCounter() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_imageQueryBuilder_ResetOCounter(t *testing.T) { + tests := []struct { + name string + id int + want int + wantErr bool + }{ + { + "decrement", + imageIDs[2], + 0, + false, + }, + { + "zero", + imageIDs[0], + 0, + false, + }, + { + "invalid", + invalidID, + 0, + true, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.ResetOCounter(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.ResetOCounter() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("imageQueryBuilder.ResetOCounter() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_imageQueryBuilder_Destroy(t *testing.T) { + tests := []struct { + name string + id int + wantErr bool + }{ + { + "valid", + imageIDs[imageIdxWithGallery], + false, + }, + { + "invalid", + invalidID, + true, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + withRollbackTxn(func(ctx context.Context) error { + if err := qb.Destroy(ctx, tt.id); (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.Destroy() error = %v, wantErr %v", err, tt.wantErr) + } + + // ensure cannot be found + i, err := qb.Find(ctx, tt.id) + + assert.NotNil(err) + assert.Nil(i) + return nil + }) + }) + } +} + +func makeImageWithID(index int) *models.Image { + ret := makeImage(index) + ret.ID = imageIDs[index] + + ret.Files = models.NewRelatedImageFiles([]*file.ImageFile{makeImageFile(index)}) + + return ret +} + +func Test_imageQueryBuilder_Find(t *testing.T) { + tests := []struct { + name string + id int + want *models.Image + wantErr bool + }{ + { + "valid", + imageIDs[imageIdxWithGallery], + makeImageWithID(imageIdxWithGallery), + false, + }, + { + "invalid", + invalidID, + nil, + true, + }, + { + "with performers", + imageIDs[imageIdxWithTwoPerformers], + makeImageWithID(imageIdxWithTwoPerformers), + false, + }, + { + "with tags", + imageIDs[imageIdxWithTwoTags], + makeImageWithID(imageIdxWithTwoTags), + false, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.Find(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.Find() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if got != nil { + // load relationships + if err := loadImageRelationships(ctx, *tt.want, got); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + clearImageFileIDs(got) + } + assert.Equal(tt.want, got) + }) + } +} + +func postFindImages(ctx context.Context, want []*models.Image, got []*models.Image) error { + for i, s := range got { + // load relationships + if i < len(want) { + if err := loadImageRelationships(ctx, *want[i], s); err != nil { + return err + } } + clearImageFileIDs(s) + } - assert.Len(t, images, 0) + return nil +} - return nil - }) +func Test_imageQueryBuilder_FindMany(t *testing.T) { + tests := []struct { + name string + ids []int + want []*models.Image + wantErr bool + }{ + { + "valid with relationships", + []int{imageIDs[imageIdxWithGallery], imageIDs[imageIdxWithTwoPerformers], imageIDs[imageIdxWithTwoTags]}, + []*models.Image{ + makeImageWithID(imageIdxWithGallery), + makeImageWithID(imageIdxWithTwoPerformers), + makeImageWithID(imageIdxWithTwoTags), + }, + false, + }, + { + "invalid", + []int{imageIDs[imageIdxWithGallery], imageIDs[imageIdxWithTwoPerformers], invalidID}, + nil, + true, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.FindMany(ctx, tt.ids) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.FindMany() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindImages(ctx, tt.want, got); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("imageQueryBuilder.FindMany() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_imageQueryBuilder_FindByChecksum(t *testing.T) { + getChecksum := func(index int) string { + return getImageStringValue(index, checksumField) + } + + tests := []struct { + name string + checksum string + want []*models.Image + wantErr bool + }{ + { + "valid", + getChecksum(imageIdxWithGallery), + []*models.Image{makeImageWithID(imageIdxWithGallery)}, + false, + }, + { + "invalid", + "invalid checksum", + nil, + false, + }, + { + "with performers", + getChecksum(imageIdxWithTwoPerformers), + []*models.Image{makeImageWithID(imageIdxWithTwoPerformers)}, + false, + }, + { + "with tags", + getChecksum(imageIdxWithTwoTags), + []*models.Image{makeImageWithID(imageIdxWithTwoTags)}, + false, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByChecksum(ctx, tt.checksum) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.FindByChecksum() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindImages(ctx, tt.want, got); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_imageQueryBuilder_FindByFingerprints(t *testing.T) { + getChecksum := func(index int) string { + return getImageStringValue(index, checksumField) + } + + tests := []struct { + name string + fingerprints []file.Fingerprint + want []*models.Image + wantErr bool + }{ + { + "valid", + []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: getChecksum(imageIdxWithGallery), + }, + }, + []*models.Image{makeImageWithID(imageIdxWithGallery)}, + false, + }, + { + "invalid", + []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: "invalid checksum", + }, + }, + nil, + false, + }, + { + "with performers", + []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: getChecksum(imageIdxWithTwoPerformers), + }, + }, + []*models.Image{makeImageWithID(imageIdxWithTwoPerformers)}, + false, + }, + { + "with tags", + []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: getChecksum(imageIdxWithTwoTags), + }, + }, + []*models.Image{makeImageWithID(imageIdxWithTwoTags)}, + false, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByFingerprints(ctx, tt.fingerprints) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.FindByChecksum() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindImages(ctx, tt.want, got); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_imageQueryBuilder_FindByGalleryID(t *testing.T) { + tests := []struct { + name string + galleryID int + want []*models.Image + wantErr bool + }{ + { + "valid", + galleryIDs[galleryIdxWithTwoImages], + []*models.Image{makeImageWithID(imageIdx1WithGallery), makeImageWithID(imageIdx2WithGallery)}, + false, + }, + { + "none", + galleryIDs[galleryIdx1WithPerformer], + nil, + false, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByGalleryID(ctx, tt.galleryID) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.FindByGalleryID() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindImages(ctx, tt.want, got); err != nil { + t.Errorf("loadImageRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + return + }) + } +} + +func Test_imageQueryBuilder_CountByGalleryID(t *testing.T) { + tests := []struct { + name string + galleryID int + want int + wantErr bool + }{ + { + "valid", + galleryIDs[galleryIdxWithTwoImages], + 2, + false, + }, + { + "none", + galleryIDs[galleryIdx1WithPerformer], + 0, + false, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.CountByGalleryID(ctx, tt.galleryID) + if (err != nil) != tt.wantErr { + t.Errorf("imageQueryBuilder.CountByGalleryID() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("imageQueryBuilder.CountByGalleryID() = %v, want %v", got, tt.want) + } + }) + } +} + +func imagesToIDs(i []*models.Image) []int { + var ret []int + for _, ii := range i { + ret = append(ret, ii.ID) + } + + return ret +} + +func Test_imageStore_FindByFileID(t *testing.T) { + tests := []struct { + name string + fileID file.ID + include []int + exclude []int + }{ + { + "valid", + imageFileIDs[imageIdxWithGallery], + []int{imageIdxWithGallery}, + nil, + }, + { + "invalid", + invalidFileID, + nil, + []int{imageIdxWithGallery}, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByFileID(ctx, tt.fileID) + if err != nil { + t.Errorf("ImageStore.FindByFileID() error = %v", err) + return + } + for _, f := range got { + clearImageFileIDs(f) + } + + ids := imagesToIDs(got) + include := indexesToIDs(imageIDs, tt.include) + exclude := indexesToIDs(imageIDs, tt.exclude) + + for _, i := range include { + assert.Contains(ids, i) + } + for _, e := range exclude { + assert.NotContains(ids, e) + } + }) + } +} + +func Test_imageStore_FindByFolderID(t *testing.T) { + tests := []struct { + name string + folderID file.FolderID + include []int + exclude []int + }{ + { + "valid", + folderIDs[folderIdxWithImageFiles], + []int{imageIdxWithGallery}, + nil, + }, + { + "invalid", + invalidFolderID, + nil, + []int{imageIdxWithGallery}, + }, + { + "parent folder", + folderIDs[folderIdxForObjectFiles], + nil, + []int{imageIdxWithGallery}, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByFolderID(ctx, tt.folderID) + if err != nil { + t.Errorf("ImageStore.FindByFolderID() error = %v", err) + return + } + for _, f := range got { + clearImageFileIDs(f) + } + + ids := imagesToIDs(got) + include := indexesToIDs(imageIDs, tt.include) + exclude := indexesToIDs(imageIDs, tt.exclude) + + for _, i := range include { + assert.Contains(ids, i) + } + for _, e := range exclude { + assert.NotContains(ids, e) + } + }) + } +} + +func Test_imageStore_FindByZipFileID(t *testing.T) { + tests := []struct { + name string + zipFileID file.ID + include []int + exclude []int + }{ + { + "valid", + fileIDs[fileIdxZip], + []int{imageIdxInZip}, + nil, + }, + { + "invalid", + invalidFileID, + nil, + []int{imageIdxInZip}, + }, + } + + qb := db.Image + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByZipFileID(ctx, tt.zipFileID) + if err != nil { + t.Errorf("ImageStore.FindByZipFileID() error = %v", err) + return + } + for _, f := range got { + clearImageFileIDs(f) + } + + ids := imagesToIDs(got) + include := indexesToIDs(imageIDs, tt.include) + exclude := indexesToIDs(imageIDs, tt.exclude) + + for _, i := range include { + assert.Contains(ids, i) + } + for _, e := range exclude { + assert.NotContains(ids, e) + } + }) + } } func TestImageQueryQ(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { const imageIdx = 2 q := getImageStringValue(imageIdx, titleField) - sqb := r.Image() + sqb := db.Image - imageQueryQ(t, sqb, q, imageIdx) + imageQueryQ(ctx, t, sqb, q, imageIdx) return nil }) } -func queryImagesWithCount(sqb models.ImageReader, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) ([]*models.Image, int, error) { - result, err := sqb.Query(models.ImageQueryOptions{ +func queryImagesWithCount(ctx context.Context, sqb models.ImageReader, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) ([]*models.Image, int, error) { + result, err := sqb.Query(ctx, models.ImageQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: findFilter, Count: true, @@ -121,7 +1472,7 @@ func queryImagesWithCount(sqb models.ImageReader, imageFilter *models.ImageFilte return nil, 0, err } - images, err := result.Resolve() + images, err := result.Resolve(ctx) if err != nil { return nil, 0, err } @@ -129,17 +1480,17 @@ func queryImagesWithCount(sqb models.ImageReader, imageFilter *models.ImageFilte return images, result.Count, nil } -func imageQueryQ(t *testing.T, sqb models.ImageReader, q string, expectedImageIdx int) { +func imageQueryQ(ctx context.Context, t *testing.T, sqb models.ImageReader, q string, expectedImageIdx int) { filter := models.FindFilterType{ Q: &q, } - images := queryImages(t, sqb, nil, &filter) + images := queryImages(ctx, t, sqb, nil, &filter) assert.Len(t, images, 1) image := images[0] assert.Equal(t, imageIDs[expectedImageIdx], image.ID) - count, err := sqb.QueryCount(nil, &filter) + count, err := sqb.QueryCount(ctx, nil, &filter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -147,14 +1498,14 @@ func imageQueryQ(t *testing.T, sqb models.ImageReader, q string, expectedImageId // no Q should return all results filter.Q = nil - images = queryImages(t, sqb, nil, &filter) + images = queryImages(ctx, t, sqb, nil, &filter) assert.Len(t, images, totalImages) } func TestImageQueryPath(t *testing.T) { const imageIdx = 1 - imagePath := getImageStringValue(imageIdx, "Path") + imagePath := getFilePath(folderIdxWithImageFiles, getImageBasename(imageIdx)) pathCriterion := models.StringCriterionInput{ Value: imagePath, @@ -175,13 +1526,13 @@ func TestImageQueryPath(t *testing.T) { } func verifyImagePath(t *testing.T, pathCriterion models.StringCriterionInput, expected int) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image imageFilter := models.ImageFilterType{ Path: &pathCriterion, } - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) assert.Equal(t, expected, len(images), "number of returned images") @@ -197,8 +1548,8 @@ func TestImageQueryPathOr(t *testing.T) { const image1Idx = 1 const image2Idx = 2 - image1Path := getImageStringValue(image1Idx, "Path") - image2Path := getImageStringValue(image2Idx, "Path") + image1Path := getFilePath(folderIdxWithImageFiles, getImageBasename(image1Idx)) + image2Path := getFilePath(folderIdxWithImageFiles, getImageBasename(image2Idx)) imageFilter := models.ImageFilterType{ Path: &models.StringCriterionInput{ @@ -213,12 +1564,15 @@ func TestImageQueryPathOr(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) + + if !assert.Len(t, images, 2) { + return nil + } - assert.Len(t, images, 2) assert.Equal(t, image1Path, images[0].Path) assert.Equal(t, image2Path, images[1].Path) @@ -228,7 +1582,7 @@ func TestImageQueryPathOr(t *testing.T) { func TestImageQueryPathAndRating(t *testing.T) { const imageIdx = 1 - imagePath := getImageStringValue(imageIdx, "Path") + imagePath := getFilePath(folderIdxWithImageFiles, getImageBasename(imageIdx)) imageRating := getRating(imageIdx) imageFilter := models.ImageFilterType{ @@ -244,14 +1598,14 @@ func TestImageQueryPathAndRating(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 1) assert.Equal(t, imagePath, images[0].Path) - assert.Equal(t, imageRating.Int64, images[0].Rating.Int64) + assert.Equal(t, int(imageRating.Int64), *images[0].Rating) return nil }) @@ -279,15 +1633,15 @@ func TestImageQueryPathNotRating(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) for _, image := range images { verifyString(t, image.Path, pathCriterion) ratingCriterion.Modifier = models.CriterionModifierNotEquals - verifyInt64(t, image.Rating, ratingCriterion) + verifyIntPtr(t, image.Rating, ratingCriterion) } return nil @@ -310,20 +1664,20 @@ func TestImageIllegalQuery(t *testing.T) { Or: &subFilter, } - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image - _, _, err := queryImagesWithCount(sqb, imageFilter, nil) + _, _, err := queryImagesWithCount(ctx, sqb, imageFilter, nil) assert.NotNil(err) imageFilter.Or = nil imageFilter.Not = &subFilter - _, _, err = queryImagesWithCount(sqb, imageFilter, nil) + _, _, err = queryImagesWithCount(ctx, sqb, imageFilter, nil) assert.NotNil(err) imageFilter.And = nil imageFilter.Or = &subFilter - _, _, err = queryImagesWithCount(sqb, imageFilter, nil) + _, _, err = queryImagesWithCount(ctx, sqb, imageFilter, nil) assert.NotNil(err) return nil @@ -356,19 +1710,19 @@ func TestImageQueryRating(t *testing.T) { } func verifyImagesRating(t *testing.T, ratingCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image imageFilter := models.ImageFilterType{ Rating: &ratingCriterion, } - images, _, err := queryImagesWithCount(sqb, &imageFilter, nil) + images, _, err := queryImagesWithCount(ctx, sqb, &imageFilter, nil) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } for _, image := range images { - verifyInt64(t, image.Rating, ratingCriterion) + verifyIntPtr(t, image.Rating, ratingCriterion) } return nil @@ -395,13 +1749,13 @@ func TestImageQueryOCounter(t *testing.T) { } func verifyImagesOCounter(t *testing.T, oCounterCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image imageFilter := models.ImageFilterType{ OCounter: &oCounterCriterion, } - images, _, err := queryImagesWithCount(sqb, &imageFilter, nil) + images, _, err := queryImagesWithCount(ctx, sqb, &imageFilter, nil) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -424,8 +1778,8 @@ func TestImageQueryResolution(t *testing.T) { } func verifyImagesResolution(t *testing.T, resolution models.ResolutionEnum) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image imageFilter := models.ImageFilterType{ Resolution: &models.ResolutionCriterionInput{ Value: resolution, @@ -433,40 +1787,48 @@ func verifyImagesResolution(t *testing.T, resolution models.ResolutionEnum) { }, } - images, _, err := queryImagesWithCount(sqb, &imageFilter, nil) + images, _, err := queryImagesWithCount(ctx, sqb, &imageFilter, nil) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } for _, image := range images { - verifyImageResolution(t, image.Height, resolution) + if err := image.LoadPrimaryFile(ctx, db.File); err != nil { + t.Errorf("Error loading primary file: %s", err.Error()) + return nil + } + + verifyImageResolution(t, image.Files.Primary().Height, resolution) } return nil }) } -func verifyImageResolution(t *testing.T, height sql.NullInt64, resolution models.ResolutionEnum) { +func verifyImageResolution(t *testing.T, height int, resolution models.ResolutionEnum) { + if !resolution.IsValid() { + return + } + assert := assert.New(t) - h := height.Int64 switch resolution { case models.ResolutionEnumLow: - assert.True(h < 480) + assert.True(height < 480) case models.ResolutionEnumStandard: - assert.True(h >= 480 && h < 720) + assert.True(height >= 480 && height < 720) case models.ResolutionEnumStandardHd: - assert.True(h >= 720 && h < 1080) + assert.True(height >= 720 && height < 1080) case models.ResolutionEnumFullHd: - assert.True(h >= 1080 && h < 2160) + assert.True(height >= 1080 && height < 2160) case models.ResolutionEnumFourK: - assert.True(h >= 2160) + assert.True(height >= 2160) } } func TestImageQueryIsMissingGalleries(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image isMissing := "galleries" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, @@ -477,7 +1839,7 @@ func TestImageQueryIsMissingGalleries(t *testing.T) { Q: &q, } - images, _, err := queryImagesWithCount(sqb, &imageFilter, &findFilter) + images, _, err := queryImagesWithCount(ctx, sqb, &imageFilter, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -485,7 +1847,7 @@ func TestImageQueryIsMissingGalleries(t *testing.T) { assert.Len(t, images, 0) findFilter.Q = nil - images, _, err = queryImagesWithCount(sqb, &imageFilter, &findFilter) + images, _, err = queryImagesWithCount(ctx, sqb, &imageFilter, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -502,8 +1864,8 @@ func TestImageQueryIsMissingGalleries(t *testing.T) { } func TestImageQueryIsMissingStudio(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image isMissing := "studio" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, @@ -514,7 +1876,7 @@ func TestImageQueryIsMissingStudio(t *testing.T) { Q: &q, } - images, _, err := queryImagesWithCount(sqb, &imageFilter, &findFilter) + images, _, err := queryImagesWithCount(ctx, sqb, &imageFilter, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -522,7 +1884,7 @@ func TestImageQueryIsMissingStudio(t *testing.T) { assert.Len(t, images, 0) findFilter.Q = nil - images, _, err = queryImagesWithCount(sqb, &imageFilter, &findFilter) + images, _, err = queryImagesWithCount(ctx, sqb, &imageFilter, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -537,8 +1899,8 @@ func TestImageQueryIsMissingStudio(t *testing.T) { } func TestImageQueryIsMissingPerformers(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image isMissing := "performers" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, @@ -549,7 +1911,7 @@ func TestImageQueryIsMissingPerformers(t *testing.T) { Q: &q, } - images, _, err := queryImagesWithCount(sqb, &imageFilter, &findFilter) + images, _, err := queryImagesWithCount(ctx, sqb, &imageFilter, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -557,7 +1919,7 @@ func TestImageQueryIsMissingPerformers(t *testing.T) { assert.Len(t, images, 0) findFilter.Q = nil - images, _, err = queryImagesWithCount(sqb, &imageFilter, &findFilter) + images, _, err = queryImagesWithCount(ctx, sqb, &imageFilter, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -574,8 +1936,8 @@ func TestImageQueryIsMissingPerformers(t *testing.T) { } func TestImageQueryIsMissingTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image isMissing := "tags" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, @@ -586,7 +1948,7 @@ func TestImageQueryIsMissingTags(t *testing.T) { Q: &q, } - images, _, err := queryImagesWithCount(sqb, &imageFilter, &findFilter) + images, _, err := queryImagesWithCount(ctx, sqb, &imageFilter, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -594,7 +1956,7 @@ func TestImageQueryIsMissingTags(t *testing.T) { assert.Len(t, images, 0) findFilter.Q = nil - images, _, err = queryImagesWithCount(sqb, &imageFilter, &findFilter) + images, _, err = queryImagesWithCount(ctx, sqb, &imageFilter, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -606,23 +1968,23 @@ func TestImageQueryIsMissingTags(t *testing.T) { } func TestImageQueryIsMissingRating(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image isMissing := "rating" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, } - images, _, err := queryImagesWithCount(sqb, &imageFilter, nil) + images, _, err := queryImagesWithCount(ctx, sqb, &imageFilter, nil) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } assert.True(t, len(images) > 0) - // ensure date is null, empty or "0001-01-01" + // ensure rating is null for _, image := range images { - assert.True(t, !image.Rating.Valid) + assert.Nil(t, image.Rating) } return nil @@ -630,8 +1992,8 @@ func TestImageQueryIsMissingRating(t *testing.T) { } func TestImageQueryGallery(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image galleryCriterion := models.MultiCriterionInput{ Value: []string{ strconv.Itoa(galleryIDs[galleryIdxWithImage]), @@ -643,7 +2005,7 @@ func TestImageQueryGallery(t *testing.T) { Galleries: &galleryCriterion, } - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 1) // ensure ids are correct @@ -659,7 +2021,7 @@ func TestImageQueryGallery(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - images = queryImages(t, sqb, &imageFilter, nil) + images = queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithTwoGalleries], images[0].ID) @@ -676,11 +2038,11 @@ func TestImageQueryGallery(t *testing.T) { Q: &q, } - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) q = getImageStringValue(imageIdxWithPerformer, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 1) return nil @@ -688,8 +2050,8 @@ func TestImageQueryGallery(t *testing.T) { } func TestImageQueryPerformers(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image performerCriterion := models.MultiCriterionInput{ Value: []string{ strconv.Itoa(performerIDs[performerIdxWithImage]), @@ -702,7 +2064,7 @@ func TestImageQueryPerformers(t *testing.T) { Performers: &performerCriterion, } - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 2) // ensure ids are correct @@ -718,7 +2080,7 @@ func TestImageQueryPerformers(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - images = queryImages(t, sqb, &imageFilter, nil) + images = queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithTwoPerformers], images[0].ID) @@ -734,7 +2096,7 @@ func TestImageQueryPerformers(t *testing.T) { Q: &q, } - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) performerCriterion = models.MultiCriterionInput{ @@ -742,22 +2104,22 @@ func TestImageQueryPerformers(t *testing.T) { } q = getImageStringValue(imageIdxWithGallery, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithGallery], images[0].ID) q = getImageStringValue(imageIdxWithPerformerTag, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) performerCriterion.Modifier = models.CriterionModifierNotNull - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithPerformerTag], images[0].ID) q = getImageStringValue(imageIdxWithGallery, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) return nil @@ -765,8 +2127,8 @@ func TestImageQueryPerformers(t *testing.T) { } func TestImageQueryTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithImage]), @@ -779,7 +2141,7 @@ func TestImageQueryTags(t *testing.T) { Tags: &tagCriterion, } - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 2) // ensure ids are correct @@ -795,7 +2157,7 @@ func TestImageQueryTags(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - images = queryImages(t, sqb, &imageFilter, nil) + images = queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithTwoTags], images[0].ID) @@ -811,7 +2173,7 @@ func TestImageQueryTags(t *testing.T) { Q: &q, } - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) tagCriterion = models.HierarchicalMultiCriterionInput{ @@ -819,22 +2181,22 @@ func TestImageQueryTags(t *testing.T) { } q = getImageStringValue(imageIdxWithGallery, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithGallery], images[0].ID) q = getImageStringValue(imageIdxWithTag, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) tagCriterion.Modifier = models.CriterionModifierNotNull - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithTag], images[0].ID) q = getImageStringValue(imageIdxWithGallery, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) return nil @@ -842,8 +2204,8 @@ func TestImageQueryTags(t *testing.T) { } func TestImageQueryStudio(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(studioIDs[studioIdxWithImage]), @@ -855,7 +2217,7 @@ func TestImageQueryStudio(t *testing.T) { Studios: &studioCriterion, } - images, _, err := queryImagesWithCount(sqb, &imageFilter, nil) + images, _, err := queryImagesWithCount(ctx, sqb, &imageFilter, nil) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -877,7 +2239,7 @@ func TestImageQueryStudio(t *testing.T) { Q: &q, } - images, _, err = queryImagesWithCount(sqb, &imageFilter, &findFilter) + images, _, err = queryImagesWithCount(ctx, sqb, &imageFilter, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -888,8 +2250,8 @@ func TestImageQueryStudio(t *testing.T) { } func TestImageQueryStudioDepth(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image depth := 2 studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ @@ -903,16 +2265,16 @@ func TestImageQueryStudioDepth(t *testing.T) { Studios: &studioCriterion, } - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 1) depth = 1 - images = queryImages(t, sqb, &imageFilter, nil) + images = queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 0) studioCriterion.Value = []string{strconv.Itoa(studioIDs[studioIdxWithParentAndChild])} - images = queryImages(t, sqb, &imageFilter, nil) + images = queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 1) // ensure id is correct @@ -933,23 +2295,23 @@ func TestImageQueryStudioDepth(t *testing.T) { Q: &q, } - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) depth = 1 - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 1) studioCriterion.Value = []string{strconv.Itoa(studioIDs[studioIdxWithParentAndChild])} - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) return nil }) } -func queryImages(t *testing.T, sqb models.ImageReader, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) []*models.Image { - images, _, err := queryImagesWithCount(sqb, imageFilter, findFilter) +func queryImages(ctx context.Context, t *testing.T, sqb models.ImageReader, imageFilter *models.ImageFilterType, findFilter *models.FindFilterType) []*models.Image { + images, _, err := queryImagesWithCount(ctx, sqb, imageFilter, findFilter) if err != nil { t.Errorf("Error querying images: %s", err.Error()) } @@ -958,8 +2320,8 @@ func queryImages(t *testing.T, sqb models.ImageReader, imageFilter *models.Image } func TestImageQueryPerformerTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithPerformer]), @@ -972,7 +2334,7 @@ func TestImageQueryPerformerTags(t *testing.T) { PerformerTags: &tagCriterion, } - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 2) // ensure ids are correct @@ -988,7 +2350,7 @@ func TestImageQueryPerformerTags(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - images = queryImages(t, sqb, &imageFilter, nil) + images = queryImages(ctx, t, sqb, &imageFilter, nil) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithPerformerTwoTags], images[0].ID) @@ -1005,7 +2367,7 @@ func TestImageQueryPerformerTags(t *testing.T) { Q: &q, } - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) tagCriterion = models.HierarchicalMultiCriterionInput{ @@ -1013,22 +2375,22 @@ func TestImageQueryPerformerTags(t *testing.T) { } q = getImageStringValue(imageIdxWithGallery, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithGallery], images[0].ID) q = getImageStringValue(imageIdxWithPerformerTag, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) tagCriterion.Modifier = models.CriterionModifierNotNull - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 1) assert.Equal(t, imageIDs[imageIdxWithPerformerTag], images[0].ID) q = getImageStringValue(imageIdxWithGallery, titleField) - images = queryImages(t, sqb, &imageFilter, &findFilter) + images = queryImages(ctx, t, sqb, &imageFilter, &findFilter) assert.Len(t, images, 0) return nil @@ -1055,17 +2417,17 @@ func TestImageQueryTagCount(t *testing.T) { } func verifyImagesTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image imageFilter := models.ImageFilterType{ TagCount: &tagCountCriterion, } - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) assert.Greater(t, len(images), 0) for _, image := range images { - ids, err := sqb.GetTagIDs(image.ID) + ids, err := sqb.GetTagIDs(ctx, image.ID) if err != nil { return err } @@ -1096,17 +2458,17 @@ func TestImageQueryPerformerCount(t *testing.T) { } func verifyImagesPerformerCount(t *testing.T, performerCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Image() + withTxn(func(ctx context.Context) error { + sqb := db.Image imageFilter := models.ImageFilterType{ PerformerCount: &performerCountCriterion, } - images := queryImages(t, sqb, &imageFilter, nil) + images := queryImages(ctx, t, sqb, &imageFilter, nil) assert.Greater(t, len(images), 0) for _, image := range images { - ids, err := sqb.GetPerformerIDs(image.ID) + ids, err := sqb.GetPerformerIDs(ctx, image.ID) if err != nil { return err } @@ -1118,53 +2480,90 @@ func verifyImagesPerformerCount(t *testing.T, performerCountCriterion models.Int } func TestImageQuerySorting(t *testing.T) { - withTxn(func(r models.Repository) error { - sort := titleField - direction := models.SortDirectionEnumAsc - findFilter := models.FindFilterType{ - Sort: &sort, - Direction: &direction, - } + tests := []struct { + name string + sortBy string + dir models.SortDirectionEnum + firstIdx int // -1 to ignore + lastIdx int + }{ + { + "file mod time", + "file_mod_time", + models.SortDirectionEnumDesc, + -1, + -1, + }, + { + "file size", + "filesize", + models.SortDirectionEnumDesc, + -1, + -1, + }, + { + "path", + "path", + models.SortDirectionEnumDesc, + -1, + -1, + }, + } - sqb := r.Image() - images, _, err := queryImagesWithCount(sqb, nil, &findFilter) - if err != nil { - t.Errorf("Error querying image: %s", err.Error()) - } + qb := db.Image - // images should be in same order as indexes - firstImage := images[0] - lastImage := images[len(images)-1] + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.Query(ctx, models.ImageQueryOptions{ + QueryOptions: models.QueryOptions{ + FindFilter: &models.FindFilterType{ + Sort: &tt.sortBy, + Direction: &tt.dir, + }, + }, + }) - assert.Equal(t, imageIDs[0], firstImage.ID) - assert.Equal(t, imageIDs[len(imageIDs)-1], lastImage.ID) + if err != nil { + t.Errorf("ImageStore.TestImageQuerySorting() error = %v", err) + return + } - // sort in descending order - direction = models.SortDirectionEnumDesc + images, err := got.Resolve(ctx) + if err != nil { + t.Errorf("ImageStore.TestImageQuerySorting() error = %v", err) + return + } - images, _, err = queryImagesWithCount(sqb, nil, &findFilter) - if err != nil { - t.Errorf("Error querying image: %s", err.Error()) - } - firstImage = images[0] - lastImage = images[len(images)-1] + if !assert.Greater(len(images), 0) { + return + } - assert.Equal(t, imageIDs[len(imageIDs)-1], firstImage.ID) - assert.Equal(t, imageIDs[0], lastImage.ID) + // image should be in same order as indexes + first := images[0] + last := images[len(images)-1] - return nil - }) + if tt.firstIdx != -1 { + firstID := sceneIDs[tt.firstIdx] + assert.Equal(firstID, first.ID) + } + if tt.lastIdx != -1 { + lastID := sceneIDs[tt.lastIdx] + assert.Equal(lastID, last.ID) + } + }) + } } func TestImageQueryPagination(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { perPage := 1 findFilter := models.FindFilterType{ PerPage: &perPage, } - sqb := r.Image() - images, _, err := queryImagesWithCount(sqb, nil, &findFilter) + sqb := db.Image + images, _, err := queryImagesWithCount(ctx, sqb, nil, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -1175,7 +2574,7 @@ func TestImageQueryPagination(t *testing.T) { page := 2 findFilter.Page = &page - images, _, err = queryImagesWithCount(sqb, nil, &findFilter) + images, _, err = queryImagesWithCount(ctx, sqb, nil, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -1187,7 +2586,7 @@ func TestImageQueryPagination(t *testing.T) { perPage = 2 page = 1 - images, _, err = queryImagesWithCount(sqb, nil, &findFilter) + images, _, err = queryImagesWithCount(ctx, sqb, nil, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) } @@ -1199,12 +2598,6 @@ func TestImageQueryPagination(t *testing.T) { }) } -// TODO Update -// TODO IncrementOCounter -// TODO DecrementOCounter -// TODO ResetOCounter -// TODO Destroy -// TODO FindByChecksum // TODO Count // TODO SizeCount // TODO All diff --git a/pkg/database/migrations/10_image_tables.up.sql b/pkg/sqlite/migrations/10_image_tables.up.sql similarity index 100% rename from pkg/database/migrations/10_image_tables.up.sql rename to pkg/sqlite/migrations/10_image_tables.up.sql diff --git a/pkg/database/migrations/11_tag_image.up.sql b/pkg/sqlite/migrations/11_tag_image.up.sql similarity index 100% rename from pkg/database/migrations/11_tag_image.up.sql rename to pkg/sqlite/migrations/11_tag_image.up.sql diff --git a/pkg/database/migrations/12_oshash.up.sql b/pkg/sqlite/migrations/12_oshash.up.sql similarity index 100% rename from pkg/database/migrations/12_oshash.up.sql rename to pkg/sqlite/migrations/12_oshash.up.sql diff --git a/pkg/database/migrations/13_images.up.sql b/pkg/sqlite/migrations/13_images.up.sql similarity index 100% rename from pkg/database/migrations/13_images.up.sql rename to pkg/sqlite/migrations/13_images.up.sql diff --git a/pkg/database/migrations/14_stash_box_ids.up.sql b/pkg/sqlite/migrations/14_stash_box_ids.up.sql similarity index 100% rename from pkg/database/migrations/14_stash_box_ids.up.sql rename to pkg/sqlite/migrations/14_stash_box_ids.up.sql diff --git a/pkg/database/migrations/15_file_mod_time.up.sql b/pkg/sqlite/migrations/15_file_mod_time.up.sql similarity index 100% rename from pkg/database/migrations/15_file_mod_time.up.sql rename to pkg/sqlite/migrations/15_file_mod_time.up.sql diff --git a/pkg/database/migrations/16_organized_flag.up.sql b/pkg/sqlite/migrations/16_organized_flag.up.sql similarity index 100% rename from pkg/database/migrations/16_organized_flag.up.sql rename to pkg/sqlite/migrations/16_organized_flag.up.sql diff --git a/pkg/database/migrations/17_reset_scene_size.up.sql b/pkg/sqlite/migrations/17_reset_scene_size.up.sql similarity index 100% rename from pkg/database/migrations/17_reset_scene_size.up.sql rename to pkg/sqlite/migrations/17_reset_scene_size.up.sql diff --git a/pkg/database/migrations/18_scene_galleries.up.sql b/pkg/sqlite/migrations/18_scene_galleries.up.sql similarity index 100% rename from pkg/database/migrations/18_scene_galleries.up.sql rename to pkg/sqlite/migrations/18_scene_galleries.up.sql diff --git a/pkg/database/migrations/19_performer_tags.up.sql b/pkg/sqlite/migrations/19_performer_tags.up.sql similarity index 100% rename from pkg/database/migrations/19_performer_tags.up.sql rename to pkg/sqlite/migrations/19_performer_tags.up.sql diff --git a/pkg/database/migrations/1_initial.down.sql b/pkg/sqlite/migrations/1_initial.down.sql similarity index 100% rename from pkg/database/migrations/1_initial.down.sql rename to pkg/sqlite/migrations/1_initial.down.sql diff --git a/pkg/database/migrations/1_initial.up.sql b/pkg/sqlite/migrations/1_initial.up.sql similarity index 100% rename from pkg/database/migrations/1_initial.up.sql rename to pkg/sqlite/migrations/1_initial.up.sql diff --git a/pkg/database/migrations/20_phash.up.sql b/pkg/sqlite/migrations/20_phash.up.sql similarity index 100% rename from pkg/database/migrations/20_phash.up.sql rename to pkg/sqlite/migrations/20_phash.up.sql diff --git a/pkg/database/migrations/21_performers_studios_details.up.sql b/pkg/sqlite/migrations/21_performers_studios_details.up.sql similarity index 100% rename from pkg/database/migrations/21_performers_studios_details.up.sql rename to pkg/sqlite/migrations/21_performers_studios_details.up.sql diff --git a/pkg/database/migrations/22_performers_studios_rating.up.sql b/pkg/sqlite/migrations/22_performers_studios_rating.up.sql similarity index 100% rename from pkg/database/migrations/22_performers_studios_rating.up.sql rename to pkg/sqlite/migrations/22_performers_studios_rating.up.sql diff --git a/pkg/database/migrations/23_scenes_interactive.up.sql b/pkg/sqlite/migrations/23_scenes_interactive.up.sql similarity index 100% rename from pkg/database/migrations/23_scenes_interactive.up.sql rename to pkg/sqlite/migrations/23_scenes_interactive.up.sql diff --git a/pkg/database/migrations/24_tag_aliases.up.sql b/pkg/sqlite/migrations/24_tag_aliases.up.sql similarity index 100% rename from pkg/database/migrations/24_tag_aliases.up.sql rename to pkg/sqlite/migrations/24_tag_aliases.up.sql diff --git a/pkg/database/migrations/25_saved_filters.up.sql b/pkg/sqlite/migrations/25_saved_filters.up.sql similarity index 100% rename from pkg/database/migrations/25_saved_filters.up.sql rename to pkg/sqlite/migrations/25_saved_filters.up.sql diff --git a/pkg/database/migrations/26_tag_hierarchy.up.sql b/pkg/sqlite/migrations/26_tag_hierarchy.up.sql similarity index 100% rename from pkg/database/migrations/26_tag_hierarchy.up.sql rename to pkg/sqlite/migrations/26_tag_hierarchy.up.sql diff --git a/pkg/database/migrations/27_studio_aliases.up.sql b/pkg/sqlite/migrations/27_studio_aliases.up.sql similarity index 100% rename from pkg/database/migrations/27_studio_aliases.up.sql rename to pkg/sqlite/migrations/27_studio_aliases.up.sql diff --git a/pkg/database/migrations/28_images_indexes.up.sql b/pkg/sqlite/migrations/28_images_indexes.up.sql similarity index 100% rename from pkg/database/migrations/28_images_indexes.up.sql rename to pkg/sqlite/migrations/28_images_indexes.up.sql diff --git a/pkg/database/migrations/29_interactive_speed.up.sql b/pkg/sqlite/migrations/29_interactive_speed.up.sql similarity index 100% rename from pkg/database/migrations/29_interactive_speed.up.sql rename to pkg/sqlite/migrations/29_interactive_speed.up.sql diff --git a/pkg/database/migrations/2_cover_image.up.sql b/pkg/sqlite/migrations/2_cover_image.up.sql similarity index 100% rename from pkg/database/migrations/2_cover_image.up.sql rename to pkg/sqlite/migrations/2_cover_image.up.sql diff --git a/pkg/database/migrations/30_ignore_autotag.up..sql b/pkg/sqlite/migrations/30_ignore_autotag.up..sql similarity index 100% rename from pkg/database/migrations/30_ignore_autotag.up..sql rename to pkg/sqlite/migrations/30_ignore_autotag.up..sql diff --git a/pkg/database/migrations/31_scenes_captions.up.sql b/pkg/sqlite/migrations/31_scenes_captions.up.sql similarity index 100% rename from pkg/database/migrations/31_scenes_captions.up.sql rename to pkg/sqlite/migrations/31_scenes_captions.up.sql diff --git a/pkg/sqlite/migrations/32_files.up.sql b/pkg/sqlite/migrations/32_files.up.sql new file mode 100644 index 000000000..8e76b0d37 --- /dev/null +++ b/pkg/sqlite/migrations/32_files.up.sql @@ -0,0 +1,547 @@ +-- folders may be deleted independently. Don't cascade +CREATE TABLE `folders` ( + `id` integer not null primary key autoincrement, + `path` varchar(255) NOT NULL, + `parent_folder_id` integer, + `mod_time` datetime not null, + `created_at` datetime not null, + `updated_at` datetime not null, + foreign key(`parent_folder_id`) references `folders`(`id`) on delete SET NULL +); + +CREATE INDEX `index_folders_on_parent_folder_id` on `folders` (`parent_folder_id`); +CREATE UNIQUE INDEX `index_folders_on_path_unique` on `folders` (`path`); + +-- require reference folders/zip files to be deleted manually first +CREATE TABLE `files` ( + `id` integer not null primary key autoincrement, + `basename` varchar(255) NOT NULL, + `zip_file_id` integer, + `parent_folder_id` integer not null, + `size` integer NOT NULL, + `mod_time` datetime not null, + `created_at` datetime not null, + `updated_at` datetime not null, + foreign key(`parent_folder_id`) references `folders`(`id`), + foreign key(`zip_file_id`) references `files`(`id`), + CHECK (`basename` != '') +); + +CREATE UNIQUE INDEX `index_files_zip_basename_unique` ON `files` (`zip_file_id`, `parent_folder_id`, `basename`) WHERE `zip_file_id` IS NOT NULL; +CREATE UNIQUE INDEX `index_files_on_parent_folder_id_basename_unique` on `files` (`parent_folder_id`, `basename`); +CREATE INDEX `index_files_on_basename` on `files` (`basename`); + +ALTER TABLE `folders` ADD COLUMN `zip_file_id` integer REFERENCES `files`(`id`); +CREATE INDEX `index_folders_on_zip_file_id` on `folders` (`zip_file_id`) WHERE `zip_file_id` IS NOT NULL; + +CREATE TABLE `files_fingerprints` ( + `file_id` integer NOT NULL, + `type` varchar(255) NOT NULL, + `fingerprint` blob NOT NULL, + foreign key(`file_id`) references `files`(`id`) on delete CASCADE, + PRIMARY KEY (`file_id`, `type`, `fingerprint`) +); + +CREATE INDEX `index_fingerprint_type_fingerprint` ON `files_fingerprints` (`type`, `fingerprint`); + +CREATE TABLE `video_files` ( + `file_id` integer NOT NULL primary key, + `duration` float NOT NULL, + `video_codec` varchar(255) NOT NULL, + `format` varchar(255) NOT NULL, + `audio_codec` varchar(255) NOT NULL, + `width` tinyint NOT NULL, + `height` tinyint NOT NULL, + `frame_rate` float NOT NULL, + `bit_rate` integer NOT NULL, + `interactive` boolean not null default '0', + `interactive_speed` int, + foreign key(`file_id`) references `files`(`id`) on delete CASCADE +); + +CREATE TABLE `video_captions` ( + `file_id` integer NOT NULL, + `language_code` varchar(255) NOT NULL, + `filename` varchar(255) NOT NULL, + `caption_type` varchar(255) NOT NULL, + primary key (`file_id`, `language_code`, `caption_type`), + foreign key(`file_id`) references `video_files`(`file_id`) on delete CASCADE +); + +CREATE TABLE `image_files` ( + `file_id` integer NOT NULL primary key, + `format` varchar(255) NOT NULL, + `width` tinyint NOT NULL, + `height` tinyint NOT NULL, + foreign key(`file_id`) references `files`(`id`) on delete CASCADE +); + +CREATE TABLE `images_files` ( + `image_id` integer NOT NULL, + `file_id` integer NOT NULL, + `primary` boolean NOT NULL, + foreign key(`image_id`) references `images`(`id`) on delete CASCADE, + foreign key(`file_id`) references `files`(`id`) on delete CASCADE, + PRIMARY KEY(`image_id`, `file_id`) +); + +CREATE INDEX `index_images_files_on_file_id` on `images_files` (`file_id`); +CREATE UNIQUE INDEX `unique_index_images_files_on_primary` on `images_files` (`image_id`) WHERE `primary` = 1; + +CREATE TABLE `galleries_files` ( + `gallery_id` integer NOT NULL, + `file_id` integer NOT NULL, + `primary` boolean NOT NULL, + foreign key(`gallery_id`) references `galleries`(`id`) on delete CASCADE, + foreign key(`file_id`) references `files`(`id`) on delete CASCADE, + PRIMARY KEY(`gallery_id`, `file_id`) +); + +CREATE INDEX `index_galleries_files_file_id` ON `galleries_files` (`file_id`); +CREATE UNIQUE INDEX `unique_index_galleries_files_on_primary` on `galleries_files` (`gallery_id`) WHERE `primary` = 1; + +CREATE TABLE `scenes_files` ( + `scene_id` integer NOT NULL, + `file_id` integer NOT NULL, + `primary` boolean NOT NULL, + foreign key(`scene_id`) references `scenes`(`id`) on delete CASCADE, + foreign key(`file_id`) references `files`(`id`) on delete CASCADE, + PRIMARY KEY(`scene_id`, `file_id`) +); + +CREATE INDEX `index_scenes_files_file_id` ON `scenes_files` (`file_id`); +CREATE UNIQUE INDEX `unique_index_scenes_files_on_primary` on `scenes_files` (`scene_id`) WHERE `primary` = 1; + +PRAGMA foreign_keys=OFF; + +CREATE TABLE `images_new` ( + `id` integer not null primary key autoincrement, + -- REMOVED: `path` varchar(510) not null, + -- REMOVED: `checksum` varchar(255) not null, + `title` varchar(255), + `rating` tinyint, + -- REMOVED: `size` integer, + -- REMOVED: `width` tinyint, + -- REMOVED: `height` tinyint, + `studio_id` integer, + `o_counter` tinyint not null default 0, + `organized` boolean not null default '0', + -- REMOVED: `file_mod_time` datetime, + `created_at` datetime not null, + `updated_at` datetime not null, + foreign key(`studio_id`) references `studios`(`id`) on delete SET NULL +); + +INSERT INTO `images_new` + ( + `id`, + `title`, + `rating`, + `studio_id`, + `o_counter`, + `organized`, + `created_at`, + `updated_at` + ) + SELECT + `id`, + `title`, + `rating`, + `studio_id`, + `o_counter`, + `organized`, + `created_at`, + `updated_at` + FROM `images`; + +-- create temporary placeholder folder +INSERT INTO `folders` (`path`, `mod_time`, `created_at`, `updated_at`) VALUES ('', '1970-01-01 00:00:00', '1970-01-01 00:00:00', '1970-01-01 00:00:00'); + +-- insert image files - we will fix these up in the post-migration +INSERT INTO `files` + ( + `basename`, + `parent_folder_id`, + `size`, + `mod_time`, + `created_at`, + `updated_at` + ) + SELECT + `path`, + 1, + -- special value if null so that it is recalculated + COALESCE(`size`, -1), + COALESCE(`file_mod_time`, '1970-01-01 00:00:00'), + `created_at`, + `updated_at` + FROM `images`; + +INSERT INTO `image_files` + ( + `file_id`, + `format`, + `width`, + `height` + ) + SELECT + `files`.`id`, + -- special values so that they are recalculated + 'unset', + COALESCE(`images`.`width`, -1), + COALESCE(`images`.`height`, -1) + FROM `images` INNER JOIN `files` ON `images`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1; + +INSERT INTO `images_files` + ( + `image_id`, + `file_id`, + `primary` + ) + SELECT + `images`.`id`, + `files`.`id`, + 1 + FROM `images` INNER JOIN `files` ON `images`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1; + +INSERT INTO `files_fingerprints` + ( + `file_id`, + `type`, + `fingerprint` + ) + SELECT + `files`.`id`, + 'md5', + `images`.`checksum` + FROM `images` INNER JOIN `files` ON `images`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1; + +DROP TABLE `images`; +ALTER TABLE `images_new` rename to `images`; + +CREATE INDEX `index_images_on_studio_id` on `images` (`studio_id`); + + +CREATE TABLE `galleries_new` ( + `id` integer not null primary key autoincrement, + -- REMOVED: `path` varchar(510), + -- REMOVED: `checksum` varchar(255) not null, + -- REMOVED: `zip` boolean not null default '0', + `folder_id` integer, + `title` varchar(255), + `url` varchar(255), + `date` date, + `details` text, + `studio_id` integer, + `rating` tinyint, + -- REMOVED: `file_mod_time` datetime, + `organized` boolean not null default '0', + `created_at` datetime not null, + `updated_at` datetime not null, + foreign key(`studio_id`) references `studios`(`id`) on delete SET NULL, + foreign key(`folder_id`) references `folders`(`id`) on delete SET NULL +); + +INSERT INTO `galleries_new` + ( + `id`, + `title`, + `url`, + `date`, + `details`, + `studio_id`, + `rating`, + `organized`, + `created_at`, + `updated_at` + ) + SELECT + `id`, + `title`, + `url`, + `date`, + `details`, + `studio_id`, + `rating`, + `organized`, + `created_at`, + `updated_at` + FROM `galleries`; + +-- insert gallery files - we will fix these up in the post-migration +INSERT INTO `files` + ( + `basename`, + `parent_folder_id`, + `size`, + `mod_time`, + `created_at`, + `updated_at` + ) + SELECT + `path`, + 1, + -- special value so that it is recalculated + -1, + COALESCE(`file_mod_time`, '1970-01-01 00:00:00'), + `created_at`, + `updated_at` + FROM `galleries` + WHERE `galleries`.`path` IS NOT NULL AND `galleries`.`zip` = '1'; + +-- insert gallery zip folders - we will fix these up in the post-migration +INSERT INTO `folders` + ( + `path`, + `zip_file_id`, + `mod_time`, + `created_at`, + `updated_at` + ) + SELECT + `galleries`.`path`, + `files`.`id`, + '1970-01-01 00:00:00', + `galleries`.`created_at`, + `galleries`.`updated_at` + FROM `galleries` + INNER JOIN `files` ON `galleries`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1 + WHERE `galleries`.`path` IS NOT NULL AND `galleries`.`zip` = '1'; + +-- set the zip file id of the zip folders +UPDATE `folders` SET `zip_file_id` = (SELECT `files`.`id` FROM `files` WHERE `folders`.`path` = `files`.`basename`); + +-- insert gallery folders - we will fix these up in the post-migration +INSERT INTO `folders` + ( + `path`, + `mod_time`, + `created_at`, + `updated_at` + ) + SELECT + `path`, + '1970-01-01 00:00:00', + `created_at`, + `updated_at` + FROM `galleries` + WHERE `galleries`.`path` IS NOT NULL AND `galleries`.`zip` = '0'; + +UPDATE `galleries_new` SET `folder_id` = ( + SELECT `folders`.`id` FROM `folders` INNER JOIN `galleries` ON `galleries_new`.`id` = `galleries`.`id` WHERE `folders`.`path` = `galleries`.`path` AND `galleries`.`zip` = '0' +); + +INSERT INTO `galleries_files` + ( + `gallery_id`, + `file_id`, + `primary` + ) + SELECT + `galleries`.`id`, + `files`.`id`, + 1 + FROM `galleries` INNER JOIN `files` ON `galleries`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1; + +INSERT INTO `files_fingerprints` + ( + `file_id`, + `type`, + `fingerprint` + ) + SELECT + `files`.`id`, + 'md5', + `galleries`.`checksum` + FROM `galleries` INNER JOIN `files` ON `galleries`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1; + +DROP TABLE `galleries`; +ALTER TABLE `galleries_new` rename to `galleries`; + +CREATE INDEX `index_galleries_on_studio_id` on `galleries` (`studio_id`); +-- should only be possible to create a single gallery per folder +CREATE UNIQUE INDEX `index_galleries_on_folder_id_unique` on `galleries` (`folder_id`); + +CREATE TABLE `scenes_new` ( + `id` integer not null primary key autoincrement, + -- REMOVED: `path` varchar(510) not null, + -- REMOVED: `checksum` varchar(255), + -- REMOVED: `oshash` varchar(255), + `title` varchar(255), + `details` text, + `url` varchar(255), + `date` date, + `rating` tinyint, + -- REMOVED: `size` varchar(255), + -- REMOVED: `duration` float, + -- REMOVED: `video_codec` varchar(255), + -- REMOVED: `audio_codec` varchar(255), + -- REMOVED: `width` tinyint, + -- REMOVED: `height` tinyint, + -- REMOVED: `framerate` float, + -- REMOVED: `bitrate` integer, + `studio_id` integer, + `o_counter` tinyint not null default 0, + -- REMOVED: `format` varchar(255), + `organized` boolean not null default '0', + -- REMOVED: `interactive` boolean not null default '0', + -- REMOVED: `interactive_speed` int, + `created_at` datetime not null, + `updated_at` datetime not null, + -- REMOVED: `file_mod_time` datetime, + -- REMOVED: `phash` blob, + foreign key(`studio_id`) references `studios`(`id`) on delete SET NULL + -- REMOVED: CHECK (`checksum` is not null or `oshash` is not null) +); + +INSERT INTO `scenes_new` + ( + `id`, + `title`, + `details`, + `url`, + `date`, + `rating`, + `studio_id`, + `o_counter`, + `organized`, + `created_at`, + `updated_at` + ) + SELECT + `id`, + `title`, + `details`, + `url`, + `date`, + `rating`, + `studio_id`, + `o_counter`, + `organized`, + `created_at`, + `updated_at` + FROM `scenes`; + +-- insert scene files - we will fix these up in the post-migration +INSERT INTO `files` + ( + `basename`, + `parent_folder_id`, + `size`, + `mod_time`, + `created_at`, + `updated_at` + ) + SELECT + `path`, + 1, + -- special value if null so that it is recalculated + COALESCE(`size`, -1), + COALESCE(`file_mod_time`, '1970-01-01 00:00:00'), + `created_at`, + `updated_at` + FROM `scenes`; + +INSERT INTO `video_files` + ( + `file_id`, + `duration`, + `video_codec`, + `format`, + `audio_codec`, + `width`, + `height`, + `frame_rate`, + `bit_rate`, + `interactive`, + `interactive_speed` + ) + SELECT + `files`.`id`, + `scenes`.`duration`, + -- special values for unset to be updated during scan + COALESCE(`scenes`.`video_codec`, 'unset'), + COALESCE(`scenes`.`format`, 'unset'), + COALESCE(`scenes`.`audio_codec`, 'unset'), + COALESCE(`scenes`.`width`, -1), + COALESCE(`scenes`.`height`, -1), + COALESCE(`scenes`.`framerate`, -1), + COALESCE(`scenes`.`bitrate`, -1), + `scenes`.`interactive`, + `scenes`.`interactive_speed` + FROM `scenes` INNER JOIN `files` ON `scenes`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1; + +INSERT INTO `scenes_files` + ( + `scene_id`, + `file_id`, + `primary` + ) + SELECT + `scenes`.`id`, + `files`.`id`, + 1 + FROM `scenes` INNER JOIN `files` ON `scenes`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1; + +INSERT INTO `files_fingerprints` + ( + `file_id`, + `type`, + `fingerprint` + ) + SELECT + `files`.`id`, + 'md5', + `scenes`.`checksum` + FROM `scenes` INNER JOIN `files` ON `scenes`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1 + WHERE `scenes`.`checksum` is not null; + +INSERT INTO `files_fingerprints` + ( + `file_id`, + `type`, + `fingerprint` + ) + SELECT + `files`.`id`, + 'oshash', + `scenes`.`oshash` + FROM `scenes` INNER JOIN `files` ON `scenes`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1 + WHERE `scenes`.`oshash` is not null; + +INSERT INTO `files_fingerprints` + ( + `file_id`, + `type`, + `fingerprint` + ) + SELECT + `files`.`id`, + 'phash', + `scenes`.`phash` + FROM `scenes` INNER JOIN `files` ON `scenes`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1 + WHERE `scenes`.`phash` is not null; + +INSERT INTO `video_captions` + ( + `file_id`, + `language_code`, + `filename`, + `caption_type` + ) + SELECT + `files`.`id`, + `scene_captions`.`language_code`, + `scene_captions`.`filename`, + `scene_captions`.`caption_type` + FROM `scene_captions` + INNER JOIN `scenes` ON `scene_captions`.`scene_id` = `scenes`.`id` + INNER JOIN `files` ON `scenes`.`path` = `files`.`basename` AND `files`.`parent_folder_id` = 1; + +DROP TABLE `scenes`; +DROP TABLE `scene_captions`; + +ALTER TABLE `scenes_new` rename to `scenes`; +CREATE INDEX `index_scenes_on_studio_id` on `scenes` (`studio_id`); + +PRAGMA foreign_keys=ON; diff --git a/pkg/sqlite/migrations/32_postmigrate.go b/pkg/sqlite/migrations/32_postmigrate.go new file mode 100644 index 000000000..ed80c9765 --- /dev/null +++ b/pkg/sqlite/migrations/32_postmigrate.go @@ -0,0 +1,332 @@ +package migrations + +import ( + "context" + "database/sql" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/sqlite" + "gopkg.in/guregu/null.v4" +) + +const legacyZipSeparator = "\x00" + +func post32(ctx context.Context, db *sqlx.DB) error { + logger.Info("Running post-migration for schema version 32") + + m := schema32Migrator{ + migrator: migrator{ + db: db, + }, + folderCache: make(map[string]folderInfo), + } + + if err := m.migrateFolders(ctx); err != nil { + return fmt.Errorf("migrating folders: %w", err) + } + + if err := m.migrateFiles(ctx); err != nil { + return fmt.Errorf("migrating files: %w", err) + } + + if err := m.deletePlaceholderFolder(ctx); err != nil { + return fmt.Errorf("deleting placeholder folder: %w", err) + } + + return nil +} + +type folderInfo struct { + id int + zipID sql.NullInt64 +} + +type schema32Migrator struct { + migrator + folderCache map[string]folderInfo +} + +func (m *schema32Migrator) migrateFolders(ctx context.Context) error { + logger.Infof("Migrating folders") + + const ( + limit = 1000 + logEvery = 10000 + ) + + lastID := 0 + count := 0 + + for { + gotSome := false + + if err := m.withTxn(ctx, func(tx *sqlx.Tx) error { + query := "SELECT `folders`.`id`, `folders`.`path` FROM `folders` INNER JOIN `galleries` ON `galleries`.`folder_id` = `folders`.`id`" + + if lastID != 0 { + query += fmt.Sprintf("AND `folders`.`id` > %d ", lastID) + } + + query += fmt.Sprintf("ORDER BY `folders`.`id` LIMIT %d", limit) + + rows, err := m.db.Query(query) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var id int + var p string + + err := rows.Scan(&id, &p) + if err != nil { + return err + } + + lastID = id + gotSome = true + count++ + + parent := filepath.Dir(p) + parentID, zipFileID, err := m.createFolderHierarchy(parent) + if err != nil { + return err + } + + _, err = m.db.Exec("UPDATE `folders` SET `parent_folder_id` = ?, `zip_file_id` = ? WHERE `id` = ?", parentID, zipFileID, id) + if err != nil { + return err + } + } + + return rows.Err() + }); err != nil { + return err + } + + if !gotSome { + break + } + + if count%logEvery == 0 { + logger.Infof("Migrated %d folders", count) + } + } + + return nil +} + +func (m *schema32Migrator) migrateFiles(ctx context.Context) error { + const ( + limit = 1000 + logEvery = 10000 + ) + + result := struct { + Count int `db:"count"` + }{0} + + if err := m.db.Get(&result, "SELECT COUNT(*) AS count FROM `files`"); err != nil { + return err + } + + logger.Infof("Migrating %d files...", result.Count) + + lastID := 0 + count := 0 + + for { + gotSome := false + + // using offset for this is slow. Save the last id and filter by that instead + query := "SELECT `id`, `basename` FROM `files` " + if lastID != 0 { + query += fmt.Sprintf("WHERE `id` > %d ", lastID) + } + + query += fmt.Sprintf("ORDER BY `id` LIMIT %d", limit) + + if err := m.withTxn(ctx, func(tx *sqlx.Tx) error { + rows, err := m.db.Query(query) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + gotSome = true + + var id int + var p string + + err := rows.Scan(&id, &p) + if err != nil { + return err + } + + if strings.Contains(p, legacyZipSeparator) { + // remove any null characters from the path + p = strings.ReplaceAll(p, legacyZipSeparator, string(filepath.Separator)) + } + + parent := filepath.Dir(p) + basename := filepath.Base(p) + if parent != "." { + parentID, zipFileID, err := m.createFolderHierarchy(parent) + if err != nil { + return err + } + + _, err = m.db.Exec("UPDATE `files` SET `parent_folder_id` = ?, `zip_file_id` = ?, `basename` = ? WHERE `id` = ?", parentID, zipFileID, basename, id) + if err != nil { + return fmt.Errorf("migrating file %s: %w", p, err) + } + } + + lastID = id + count++ + } + + return rows.Err() + }); err != nil { + return err + } + + if !gotSome { + break + } + + if count%logEvery == 0 { + logger.Infof("Migrated %d files", count) + + // manual checkpoint to flush wal file + if _, err := m.db.Exec("PRAGMA wal_checkpoint(FULL)"); err != nil { + return fmt.Errorf("running wal checkpoint: %w", err) + } + } + } + + logger.Infof("Finished migrating files") + + return nil +} + +func (m *schema32Migrator) deletePlaceholderFolder(ctx context.Context) error { + // only delete the placeholder folder if no files/folders are attached to it + result := struct { + Count int `db:"count"` + }{0} + + if err := m.db.Get(&result, "SELECT COUNT(*) AS count FROM `files` WHERE `parent_folder_id` = 1"); err != nil { + return err + } + + if result.Count > 0 { + return fmt.Errorf("not deleting placeholder folder because it has %d files", result.Count) + } + + result.Count = 0 + + if err := m.db.Get(&result, "SELECT COUNT(*) AS count FROM `folders` WHERE `parent_folder_id` = 1"); err != nil { + return err + } + + if result.Count > 0 { + return fmt.Errorf("not deleting placeholder folder because it has %d folders", result.Count) + } + + _, err := m.db.Exec("DELETE FROM `folders` WHERE `id` = 1") + return err +} + +func (m *schema32Migrator) createFolderHierarchy(p string) (*int, sql.NullInt64, error) { + parent := filepath.Dir(p) + + if parent == p { + // get or create this folder + return m.getOrCreateFolder(p, nil, sql.NullInt64{}) + } + + var ( + parentID *int + zipFileID sql.NullInt64 + err error + ) + + // try to find parent folder in cache first + foundEntry, ok := m.folderCache[parent] + if ok { + parentID = &foundEntry.id + zipFileID = foundEntry.zipID + } else { + parentID, zipFileID, err = m.createFolderHierarchy(parent) + if err != nil { + return nil, sql.NullInt64{}, err + } + } + + return m.getOrCreateFolder(p, parentID, zipFileID) +} + +func (m *schema32Migrator) getOrCreateFolder(path string, parentID *int, zipFileID sql.NullInt64) (*int, sql.NullInt64, error) { + foundEntry, ok := m.folderCache[path] + if ok { + return &foundEntry.id, foundEntry.zipID, nil + } + + const query = "SELECT `id`, `zip_file_id` FROM `folders` WHERE `path` = ?" + rows, err := m.db.Query(query, path) + if err != nil { + return nil, sql.NullInt64{}, err + } + defer rows.Close() + + if rows.Next() { + var id int + var zfid sql.NullInt64 + err := rows.Scan(&id, &zfid) + if err != nil { + return nil, sql.NullInt64{}, err + } + + return &id, zfid, nil + } + + if err := rows.Err(); err != nil { + return nil, sql.NullInt64{}, err + } + + const insertSQL = "INSERT INTO `folders` (`path`,`parent_folder_id`,`zip_file_id`,`mod_time`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?)" + + var parentFolderID null.Int + if parentID != nil { + parentFolderID = null.IntFrom(int64(*parentID)) + } + + now := time.Now() + result, err := m.db.Exec(insertSQL, path, parentFolderID, zipFileID, time.Time{}, now, now) + if err != nil { + return nil, sql.NullInt64{}, fmt.Errorf("creating folder %s: %w", path, err) + } + + id, err := result.LastInsertId() + if err != nil { + return nil, sql.NullInt64{}, fmt.Errorf("creating folder %s: %w", path, err) + } + + idInt := int(id) + + m.folderCache[path] = folderInfo{id: idInt, zipID: zipFileID} + + return &idInt, zipFileID, nil +} + +func init() { + sqlite.RegisterPostMigration(32, post32) +} diff --git a/pkg/sqlite/migrations/32_premigrate.go b/pkg/sqlite/migrations/32_premigrate.go new file mode 100644 index 000000000..12906f7d5 --- /dev/null +++ b/pkg/sqlite/migrations/32_premigrate.go @@ -0,0 +1,128 @@ +package migrations + +import ( + "context" + "fmt" + "os" + + "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/sqlite" +) + +func pre32(ctx context.Context, db *sqlx.DB) error { + // verify that folder-based galleries (those with zip = 0 and path is not null) are + // not zip-based. If they are zip based then set zip to 1 + // we could still miss some if the path does not exist, but this is the best we can do + + logger.Info("Running pre-migration for schema version 32") + + mm := schema32PreMigrator{ + migrator: migrator{ + db: db, + }, + } + + return mm.migrate(ctx) +} + +type schema32PreMigrator struct { + migrator +} + +func (m *schema32PreMigrator) migrate(ctx context.Context) error { + const ( + limit = 1000 + logEvery = 10000 + ) + + // query for galleries with zip = 0 and path not null + result := struct { + Count int `db:"count"` + }{0} + + if err := m.db.Get(&result, "SELECT COUNT(*) AS count FROM `galleries` WHERE `zip` = '0' AND `path` IS NOT NULL"); err != nil { + return err + } + + if result.Count == 0 { + return nil + } + + logger.Infof("Checking %d galleries for incorrect zip value...", result.Count) + + lastID := 0 + count := 0 + + for { + gotSome := false + + if err := m.withTxn(ctx, func(tx *sqlx.Tx) error { + query := "SELECT `id`, `path` FROM `galleries` WHERE `zip` = '0' AND `path` IS NOT NULL " + if lastID != 0 { + query += fmt.Sprintf("AND `id` > %d ", lastID) + } + + query += fmt.Sprintf("ORDER BY `id` LIMIT %d", limit) + + rows, err := m.db.Query(query) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var id int + var p string + + err := rows.Scan(&id, &p) + if err != nil { + return err + } + + gotSome = true + lastID = id + count++ + + // if path does not exist, make no changes + // if it does exist and is a folder, then we ignore it + // otherwise set zip to 1 + info, err := os.Stat(p) + if err != nil { + logger.Warnf("unable to verify if %q is a folder due to error %v. Assuming folder-based.", p, err) + continue + } + + if info.IsDir() { + // ignore it + continue + } + + logger.Infof("Correcting %q gallery to be zip-based.", p) + + _, err = m.db.Exec("UPDATE `galleries` SET `zip` = '1' WHERE `id` = ?", id) + if err != nil { + return err + } + } + + return rows.Err() + }); err != nil { + return err + } + + if !gotSome { + break + } + + if count%logEvery == 0 { + logger.Infof("Checked %d galleries", count) + } + } + + return nil +} + +func init() { + sqlite.RegisterPreMigration(32, pre32) +} diff --git a/pkg/database/migrations/3_o_counter.up.sql b/pkg/sqlite/migrations/3_o_counter.up.sql similarity index 100% rename from pkg/database/migrations/3_o_counter.up.sql rename to pkg/sqlite/migrations/3_o_counter.up.sql diff --git a/pkg/database/migrations/4_movie.up.sql b/pkg/sqlite/migrations/4_movie.up.sql similarity index 100% rename from pkg/database/migrations/4_movie.up.sql rename to pkg/sqlite/migrations/4_movie.up.sql diff --git a/pkg/database/migrations/5_performer_gender.down.sql b/pkg/sqlite/migrations/5_performer_gender.down.sql similarity index 100% rename from pkg/database/migrations/5_performer_gender.down.sql rename to pkg/sqlite/migrations/5_performer_gender.down.sql diff --git a/pkg/database/migrations/5_performer_gender.up.sql b/pkg/sqlite/migrations/5_performer_gender.up.sql similarity index 100% rename from pkg/database/migrations/5_performer_gender.up.sql rename to pkg/sqlite/migrations/5_performer_gender.up.sql diff --git a/pkg/database/migrations/6_scenes_format.up.sql b/pkg/sqlite/migrations/6_scenes_format.up.sql similarity index 100% rename from pkg/database/migrations/6_scenes_format.up.sql rename to pkg/sqlite/migrations/6_scenes_format.up.sql diff --git a/pkg/database/migrations/7_performer_optimization.up.sql b/pkg/sqlite/migrations/7_performer_optimization.up.sql similarity index 100% rename from pkg/database/migrations/7_performer_optimization.up.sql rename to pkg/sqlite/migrations/7_performer_optimization.up.sql diff --git a/pkg/database/migrations/8_movie_fix.up.sql b/pkg/sqlite/migrations/8_movie_fix.up.sql similarity index 100% rename from pkg/database/migrations/8_movie_fix.up.sql rename to pkg/sqlite/migrations/8_movie_fix.up.sql diff --git a/pkg/database/migrations/9_studios_parent_studio.up.sql b/pkg/sqlite/migrations/9_studios_parent_studio.up.sql similarity index 100% rename from pkg/database/migrations/9_studios_parent_studio.up.sql rename to pkg/sqlite/migrations/9_studios_parent_studio.up.sql diff --git a/pkg/sqlite/migrations/custom_migration.go b/pkg/sqlite/migrations/custom_migration.go new file mode 100644 index 000000000..baebc7094 --- /dev/null +++ b/pkg/sqlite/migrations/custom_migration.go @@ -0,0 +1,38 @@ +package migrations + +import ( + "context" + "fmt" + + "github.com/jmoiron/sqlx" +) + +type migrator struct { + db *sqlx.DB +} + +func (m *migrator) withTxn(ctx context.Context, fn func(tx *sqlx.Tx) error) error { + tx, err := m.db.BeginTxx(ctx, nil) + if err != nil { + return fmt.Errorf("beginning transaction: %w", err) + } + + defer func() { + if p := recover(); p != nil { + // a panic occurred, rollback and repanic + _ = tx.Rollback() + panic(p) + } + + if err != nil { + // something went wrong, rollback + _ = tx.Rollback() + } else { + // all good, commit + err = tx.Commit() + } + }() + + err = fn(tx) + return err +} diff --git a/pkg/sqlite/movies.go b/pkg/sqlite/movies.go index eac02ae54..0ecc6f5e5 100644 --- a/pkg/sqlite/movies.go +++ b/pkg/sqlite/movies.go @@ -1,11 +1,15 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" + "github.com/doug-martin/goqu/v9" + "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) const movieTable = "movies" @@ -15,50 +19,47 @@ type movieQueryBuilder struct { repository } -func NewMovieReaderWriter(tx dbi) *movieQueryBuilder { - return &movieQueryBuilder{ - repository{ - tx: tx, - tableName: movieTable, - idColumn: idColumn, - }, - } +var MovieReaderWriter = &movieQueryBuilder{ + repository{ + tableName: movieTable, + idColumn: idColumn, + }, } -func (qb *movieQueryBuilder) Create(newObject models.Movie) (*models.Movie, error) { +func (qb *movieQueryBuilder) Create(ctx context.Context, newObject models.Movie) (*models.Movie, error) { var ret models.Movie - if err := qb.insertObject(newObject, &ret); err != nil { + if err := qb.insertObject(ctx, newObject, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *movieQueryBuilder) Update(updatedObject models.MoviePartial) (*models.Movie, error) { +func (qb *movieQueryBuilder) Update(ctx context.Context, updatedObject models.MoviePartial) (*models.Movie, error) { const partial = true - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } - return qb.Find(updatedObject.ID) + return qb.Find(ctx, updatedObject.ID) } -func (qb *movieQueryBuilder) UpdateFull(updatedObject models.Movie) (*models.Movie, error) { +func (qb *movieQueryBuilder) UpdateFull(ctx context.Context, updatedObject models.Movie) (*models.Movie, error) { const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } - return qb.Find(updatedObject.ID) + return qb.Find(ctx, updatedObject.ID) } -func (qb *movieQueryBuilder) Destroy(id int) error { - return qb.destroyExisting([]int{id}) +func (qb *movieQueryBuilder) Destroy(ctx context.Context, id int) error { + return qb.destroyExisting(ctx, []int{id}) } -func (qb *movieQueryBuilder) Find(id int) (*models.Movie, error) { +func (qb *movieQueryBuilder) Find(ctx context.Context, id int) (*models.Movie, error) { var ret models.Movie - if err := qb.get(id, &ret); err != nil { + if err := qb.getByID(ctx, id, &ret); err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -67,35 +68,59 @@ func (qb *movieQueryBuilder) Find(id int) (*models.Movie, error) { return &ret, nil } -func (qb *movieQueryBuilder) FindMany(ids []int) ([]*models.Movie, error) { - var movies []*models.Movie - for _, id := range ids { - movie, err := qb.Find(id) - if err != nil { - return nil, err - } - - if movie == nil { - return nil, fmt.Errorf("movie with id %d not found", id) - } - - movies = append(movies, movie) +func (qb *movieQueryBuilder) FindMany(ctx context.Context, ids []int) ([]*models.Movie, error) { + tableMgr := movieTableMgr + q := goqu.Select("*").From(tableMgr.table).Where(tableMgr.byIDInts(ids...)) + unsorted, err := qb.getMany(ctx, q) + if err != nil { + return nil, err } - return movies, nil + ret := make([]*models.Movie, len(ids)) + + for _, s := range unsorted { + i := intslice.IntIndex(ids, s.ID) + ret[i] = s + } + + for i := range ret { + if ret[i] == nil { + return nil, fmt.Errorf("movie with id %d not found", ids[i]) + } + } + + return ret, nil } -func (qb *movieQueryBuilder) FindByName(name string, nocase bool) (*models.Movie, error) { +func (qb *movieQueryBuilder) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Movie, error) { + const single = false + var ret []*models.Movie + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f models.Movie + if err := r.StructScan(&f); err != nil { + return err + } + + ret = append(ret, &f) + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (qb *movieQueryBuilder) FindByName(ctx context.Context, name string, nocase bool) (*models.Movie, error) { query := "SELECT * FROM movies WHERE name = ?" if nocase { query += " COLLATE NOCASE" } query += " LIMIT 1" args := []interface{}{name} - return qb.queryMovie(query, args) + return qb.queryMovie(ctx, query, args) } -func (qb *movieQueryBuilder) FindByNames(names []string, nocase bool) ([]*models.Movie, error) { +func (qb *movieQueryBuilder) FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Movie, error) { query := "SELECT * FROM movies WHERE name" if nocase { query += " COLLATE NOCASE" @@ -105,34 +130,34 @@ func (qb *movieQueryBuilder) FindByNames(names []string, nocase bool) ([]*models for _, name := range names { args = append(args, name) } - return qb.queryMovies(query, args) + return qb.queryMovies(ctx, query, args) } -func (qb *movieQueryBuilder) Count() (int, error) { - return qb.runCountQuery(qb.buildCountQuery("SELECT movies.id FROM movies"), nil) +func (qb *movieQueryBuilder) Count(ctx context.Context) (int, error) { + return qb.runCountQuery(ctx, qb.buildCountQuery("SELECT movies.id FROM movies"), nil) } -func (qb *movieQueryBuilder) All() ([]*models.Movie, error) { - return qb.queryMovies(selectAll("movies")+qb.getMovieSort(nil), nil) +func (qb *movieQueryBuilder) All(ctx context.Context) ([]*models.Movie, error) { + return qb.queryMovies(ctx, selectAll("movies")+qb.getMovieSort(nil), nil) } -func (qb *movieQueryBuilder) makeFilter(movieFilter *models.MovieFilterType) *filterBuilder { +func (qb *movieQueryBuilder) makeFilter(ctx context.Context, movieFilter *models.MovieFilterType) *filterBuilder { query := &filterBuilder{} - query.handleCriterion(stringCriterionHandler(movieFilter.Name, "movies.name")) - query.handleCriterion(stringCriterionHandler(movieFilter.Director, "movies.director")) - query.handleCriterion(stringCriterionHandler(movieFilter.Synopsis, "movies.synopsis")) - query.handleCriterion(intCriterionHandler(movieFilter.Rating, "movies.rating")) - query.handleCriterion(durationCriterionHandler(movieFilter.Duration, "movies.duration")) - query.handleCriterion(movieIsMissingCriterionHandler(qb, movieFilter.IsMissing)) - query.handleCriterion(stringCriterionHandler(movieFilter.URL, "movies.url")) - query.handleCriterion(movieStudioCriterionHandler(qb, movieFilter.Studios)) - query.handleCriterion(moviePerformersCriterionHandler(qb, movieFilter.Performers)) + query.handleCriterion(ctx, stringCriterionHandler(movieFilter.Name, "movies.name")) + query.handleCriterion(ctx, stringCriterionHandler(movieFilter.Director, "movies.director")) + query.handleCriterion(ctx, stringCriterionHandler(movieFilter.Synopsis, "movies.synopsis")) + query.handleCriterion(ctx, intCriterionHandler(movieFilter.Rating, "movies.rating", nil)) + query.handleCriterion(ctx, durationCriterionHandler(movieFilter.Duration, "movies.duration", nil)) + query.handleCriterion(ctx, movieIsMissingCriterionHandler(qb, movieFilter.IsMissing)) + query.handleCriterion(ctx, stringCriterionHandler(movieFilter.URL, "movies.url")) + query.handleCriterion(ctx, movieStudioCriterionHandler(qb, movieFilter.Studios)) + query.handleCriterion(ctx, moviePerformersCriterionHandler(qb, movieFilter.Performers)) return query } -func (qb *movieQueryBuilder) Query(movieFilter *models.MovieFilterType, findFilter *models.FindFilterType) ([]*models.Movie, int, error) { +func (qb *movieQueryBuilder) Query(ctx context.Context, movieFilter *models.MovieFilterType, findFilter *models.FindFilterType) ([]*models.Movie, int, error) { if findFilter == nil { findFilter = &models.FindFilterType{} } @@ -148,31 +173,26 @@ func (qb *movieQueryBuilder) Query(movieFilter *models.MovieFilterType, findFilt query.parseQueryString(searchColumns, *q) } - filter := qb.makeFilter(movieFilter) + filter := qb.makeFilter(ctx, movieFilter) query.addFilter(filter) query.sortAndPagination = qb.getMovieSort(findFilter) + getPagination(findFilter) - idsResult, countResult, err := query.executeFind() + idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err } - var movies []*models.Movie - for _, id := range idsResult { - movie, err := qb.Find(id) - if err != nil { - return nil, 0, err - } - - movies = append(movies, movie) + movies, err := qb.FindMany(ctx, idsResult) + if err != nil { + return nil, 0, err } return movies, countResult, nil } func movieIsMissingCriterionHandler(qb *movieQueryBuilder, isMissing *string) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if isMissing != nil && *isMissing != "" { switch *isMissing { case "front_image": @@ -206,7 +226,7 @@ func movieStudioCriterionHandler(qb *movieQueryBuilder, studios *models.Hierarch } func moviePerformersCriterionHandler(qb *movieQueryBuilder, performers *models.MultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if performers != nil { if performers.Modifier == models.CriterionModifierIsNull || performers.Modifier == models.CriterionModifierNotNull { var notClause string @@ -273,30 +293,30 @@ func (qb *movieQueryBuilder) getMovieSort(findFilter *models.FindFilterType) str } } -func (qb *movieQueryBuilder) queryMovie(query string, args []interface{}) (*models.Movie, error) { - results, err := qb.queryMovies(query, args) +func (qb *movieQueryBuilder) queryMovie(ctx context.Context, query string, args []interface{}) (*models.Movie, error) { + results, err := qb.queryMovies(ctx, query, args) if err != nil || len(results) < 1 { return nil, err } return results[0], nil } -func (qb *movieQueryBuilder) queryMovies(query string, args []interface{}) ([]*models.Movie, error) { +func (qb *movieQueryBuilder) queryMovies(ctx context.Context, query string, args []interface{}) ([]*models.Movie, error) { var ret models.Movies - if err := qb.query(query, args, &ret); err != nil { + if err := qb.query(ctx, query, args, &ret); err != nil { return nil, err } return []*models.Movie(ret), nil } -func (qb *movieQueryBuilder) UpdateImages(movieID int, frontImage []byte, backImage []byte) error { +func (qb *movieQueryBuilder) UpdateImages(ctx context.Context, movieID int, frontImage []byte, backImage []byte) error { // Delete the existing cover and then create new - if err := qb.DestroyImages(movieID); err != nil { + if err := qb.DestroyImages(ctx, movieID); err != nil { return err } - _, err := qb.tx.Exec( + _, err := qb.tx.Exec(ctx, `INSERT INTO movies_images (movie_id, front_image, back_image) VALUES (?, ?, ?)`, movieID, frontImage, @@ -306,26 +326,26 @@ func (qb *movieQueryBuilder) UpdateImages(movieID int, frontImage []byte, backIm return err } -func (qb *movieQueryBuilder) DestroyImages(movieID int) error { +func (qb *movieQueryBuilder) DestroyImages(ctx context.Context, movieID int) error { // Delete the existing joins - _, err := qb.tx.Exec("DELETE FROM movies_images WHERE movie_id = ?", movieID) + _, err := qb.tx.Exec(ctx, "DELETE FROM movies_images WHERE movie_id = ?", movieID) if err != nil { return err } return err } -func (qb *movieQueryBuilder) GetFrontImage(movieID int) ([]byte, error) { +func (qb *movieQueryBuilder) GetFrontImage(ctx context.Context, movieID int) ([]byte, error) { query := `SELECT front_image from movies_images WHERE movie_id = ?` - return getImage(qb.tx, query, movieID) + return getImage(ctx, qb.tx, query, movieID) } -func (qb *movieQueryBuilder) GetBackImage(movieID int) ([]byte, error) { +func (qb *movieQueryBuilder) GetBackImage(ctx context.Context, movieID int) ([]byte, error) { query := `SELECT back_image from movies_images WHERE movie_id = ?` - return getImage(qb.tx, query, movieID) + return getImage(ctx, qb.tx, query, movieID) } -func (qb *movieQueryBuilder) FindByPerformerID(performerID int) ([]*models.Movie, error) { +func (qb *movieQueryBuilder) FindByPerformerID(ctx context.Context, performerID int) ([]*models.Movie, error) { query := `SELECT DISTINCT movies.* FROM movies INNER JOIN movies_scenes ON movies.id = movies_scenes.movie_id @@ -333,33 +353,33 @@ INNER JOIN performers_scenes ON performers_scenes.scene_id = movies_scenes.scene WHERE performers_scenes.performer_id = ? ` args := []interface{}{performerID} - return qb.queryMovies(query, args) + return qb.queryMovies(ctx, query, args) } -func (qb *movieQueryBuilder) CountByPerformerID(performerID int) (int, error) { +func (qb *movieQueryBuilder) CountByPerformerID(ctx context.Context, performerID int) (int, error) { query := `SELECT COUNT(DISTINCT movies_scenes.movie_id) AS count FROM movies_scenes INNER JOIN performers_scenes ON performers_scenes.scene_id = movies_scenes.scene_id WHERE performers_scenes.performer_id = ? ` args := []interface{}{performerID} - return qb.runCountQuery(query, args) + return qb.runCountQuery(ctx, query, args) } -func (qb *movieQueryBuilder) FindByStudioID(studioID int) ([]*models.Movie, error) { +func (qb *movieQueryBuilder) FindByStudioID(ctx context.Context, studioID int) ([]*models.Movie, error) { query := `SELECT movies.* FROM movies WHERE movies.studio_id = ? ` args := []interface{}{studioID} - return qb.queryMovies(query, args) + return qb.queryMovies(ctx, query, args) } -func (qb *movieQueryBuilder) CountByStudioID(studioID int) (int, error) { +func (qb *movieQueryBuilder) CountByStudioID(ctx context.Context, studioID int) (int, error) { query := `SELECT COUNT(1) AS count FROM movies WHERE movies.studio_id = ? ` args := []interface{}{studioID} - return qb.runCountQuery(query, args) + return qb.runCountQuery(ctx, query, args) } diff --git a/pkg/sqlite/movies_test.go b/pkg/sqlite/movies_test.go index 75c6cc5bf..eff0cf50b 100644 --- a/pkg/sqlite/movies_test.go +++ b/pkg/sqlite/movies_test.go @@ -4,6 +4,7 @@ package sqlite_test import ( + "context" "database/sql" "fmt" "strconv" @@ -14,15 +15,16 @@ import ( "github.com/stashapp/stash/pkg/hash/md5" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sqlite" ) func TestMovieFindByName(t *testing.T) { - withTxn(func(r models.Repository) error { - mqb := r.Movie() + withTxn(func(ctx context.Context) error { + mqb := sqlite.MovieReaderWriter name := movieNames[movieIdxWithScene] // find a movie by name - movie, err := mqb.FindByName(name, false) + movie, err := mqb.FindByName(ctx, name, false) if err != nil { t.Errorf("Error finding movies: %s", err.Error()) @@ -32,7 +34,7 @@ func TestMovieFindByName(t *testing.T) { name = movieNames[movieIdxWithDupName] // find a movie by name nocase - movie, err = mqb.FindByName(name, true) + movie, err = mqb.FindByName(ctx, name, true) if err != nil { t.Errorf("Error finding movies: %s", err.Error()) @@ -48,21 +50,21 @@ func TestMovieFindByName(t *testing.T) { } func TestMovieFindByNames(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { var names []string - mqb := r.Movie() + mqb := sqlite.MovieReaderWriter names = append(names, movieNames[movieIdxWithScene]) // find movies by names - movies, err := mqb.FindByNames(names, false) + movies, err := mqb.FindByNames(ctx, names, false) if err != nil { t.Errorf("Error finding movies: %s", err.Error()) } assert.Len(t, movies, 1) assert.Equal(t, movieNames[movieIdxWithScene], movies[0].Name.String) - movies, err = mqb.FindByNames(names, true) // find movies by names nocase + movies, err = mqb.FindByNames(ctx, names, true) // find movies by names nocase if err != nil { t.Errorf("Error finding movies: %s", err.Error()) } @@ -75,8 +77,8 @@ func TestMovieFindByNames(t *testing.T) { } func TestMovieQueryStudio(t *testing.T) { - withTxn(func(r models.Repository) error { - mqb := r.Movie() + withTxn(func(ctx context.Context) error { + mqb := sqlite.MovieReaderWriter studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(studioIDs[studioIdxWithMovie]), @@ -88,7 +90,7 @@ func TestMovieQueryStudio(t *testing.T) { Studios: &studioCriterion, } - movies, _, err := mqb.Query(&movieFilter, nil) + movies, _, err := mqb.Query(ctx, &movieFilter, nil) if err != nil { t.Errorf("Error querying movie: %s", err.Error()) } @@ -110,7 +112,7 @@ func TestMovieQueryStudio(t *testing.T) { Q: &q, } - movies, _, err = mqb.Query(&movieFilter, &findFilter) + movies, _, err = mqb.Query(ctx, &movieFilter, &findFilter) if err != nil { t.Errorf("Error querying movie: %s", err.Error()) } @@ -159,11 +161,11 @@ func TestMovieQueryURL(t *testing.T) { } func verifyMovieQuery(t *testing.T, filter models.MovieFilterType, verifyFn func(s *models.Movie)) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { t.Helper() - sqb := r.Movie() + sqb := sqlite.MovieReaderWriter - movies := queryMovie(t, sqb, &filter, nil) + movies := queryMovie(ctx, t, sqb, &filter, nil) // assume it should find at least one assert.Greater(t, len(movies), 0) @@ -176,8 +178,8 @@ func verifyMovieQuery(t *testing.T, filter models.MovieFilterType, verifyFn func }) } -func queryMovie(t *testing.T, sqb models.MovieReader, movieFilter *models.MovieFilterType, findFilter *models.FindFilterType) []*models.Movie { - movies, _, err := sqb.Query(movieFilter, findFilter) +func queryMovie(ctx context.Context, t *testing.T, sqb models.MovieReader, movieFilter *models.MovieFilterType, findFilter *models.FindFilterType) []*models.Movie { + movies, _, err := sqb.Query(ctx, movieFilter, findFilter) if err != nil { t.Errorf("Error querying movie: %s", err.Error()) } @@ -193,9 +195,9 @@ func TestMovieQuerySorting(t *testing.T) { Direction: &direction, } - withTxn(func(r models.Repository) error { - sqb := r.Movie() - movies := queryMovie(t, sqb, nil, &findFilter) + withTxn(func(ctx context.Context) error { + sqb := sqlite.MovieReaderWriter + movies := queryMovie(ctx, t, sqb, nil, &findFilter) // scenes should be in same order as indexes firstMovie := movies[0] @@ -205,7 +207,7 @@ func TestMovieQuerySorting(t *testing.T) { // sort in descending order direction = models.SortDirectionEnumAsc - movies = queryMovie(t, sqb, nil, &findFilter) + movies = queryMovie(ctx, t, sqb, nil, &findFilter) lastMovie := movies[len(movies)-1] assert.Equal(t, movieIDs[movieIdxWithScene], lastMovie.ID) @@ -215,8 +217,8 @@ func TestMovieQuerySorting(t *testing.T) { } func TestMovieUpdateMovieImages(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - mqb := r.Movie() + if err := withTxn(func(ctx context.Context) error { + mqb := sqlite.MovieReaderWriter // create movie to test against const name = "TestMovieUpdateMovieImages" @@ -224,26 +226,26 @@ func TestMovieUpdateMovieImages(t *testing.T) { Name: sql.NullString{String: name, Valid: true}, Checksum: md5.FromString(name), } - created, err := mqb.Create(movie) + created, err := mqb.Create(ctx, movie) if err != nil { return fmt.Errorf("Error creating movie: %s", err.Error()) } frontImage := []byte("frontImage") backImage := []byte("backImage") - err = mqb.UpdateImages(created.ID, frontImage, backImage) + err = mqb.UpdateImages(ctx, created.ID, frontImage, backImage) if err != nil { return fmt.Errorf("Error updating movie images: %s", err.Error()) } // ensure images are set - storedFront, err := mqb.GetFrontImage(created.ID) + storedFront, err := mqb.GetFrontImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting front image: %s", err.Error()) } assert.Equal(t, storedFront, frontImage) - storedBack, err := mqb.GetBackImage(created.ID) + storedBack, err := mqb.GetBackImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting back image: %s", err.Error()) } @@ -251,26 +253,26 @@ func TestMovieUpdateMovieImages(t *testing.T) { // set front image only newImage := []byte("newImage") - err = mqb.UpdateImages(created.ID, newImage, nil) + err = mqb.UpdateImages(ctx, created.ID, newImage, nil) if err != nil { return fmt.Errorf("Error updating movie images: %s", err.Error()) } - storedFront, err = mqb.GetFrontImage(created.ID) + storedFront, err = mqb.GetFrontImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting front image: %s", err.Error()) } assert.Equal(t, storedFront, newImage) // back image should be nil - storedBack, err = mqb.GetBackImage(created.ID) + storedBack, err = mqb.GetBackImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting back image: %s", err.Error()) } assert.Nil(t, nil) // set back image only - err = mqb.UpdateImages(created.ID, nil, newImage) + err = mqb.UpdateImages(ctx, created.ID, nil, newImage) if err == nil { return fmt.Errorf("Expected error setting nil front image") } @@ -282,8 +284,8 @@ func TestMovieUpdateMovieImages(t *testing.T) { } func TestMovieDestroyMovieImages(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - mqb := r.Movie() + if err := withTxn(func(ctx context.Context) error { + mqb := sqlite.MovieReaderWriter // create movie to test against const name = "TestMovieDestroyMovieImages" @@ -291,32 +293,32 @@ func TestMovieDestroyMovieImages(t *testing.T) { Name: sql.NullString{String: name, Valid: true}, Checksum: md5.FromString(name), } - created, err := mqb.Create(movie) + created, err := mqb.Create(ctx, movie) if err != nil { return fmt.Errorf("Error creating movie: %s", err.Error()) } frontImage := []byte("frontImage") backImage := []byte("backImage") - err = mqb.UpdateImages(created.ID, frontImage, backImage) + err = mqb.UpdateImages(ctx, created.ID, frontImage, backImage) if err != nil { return fmt.Errorf("Error updating movie images: %s", err.Error()) } - err = mqb.DestroyImages(created.ID) + err = mqb.DestroyImages(ctx, created.ID) if err != nil { return fmt.Errorf("Error destroying movie images: %s", err.Error()) } // front image should be nil - storedFront, err := mqb.GetFrontImage(created.ID) + storedFront, err := mqb.GetFrontImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting front image: %s", err.Error()) } assert.Nil(t, storedFront) // back image should be nil - storedBack, err := mqb.GetBackImage(created.ID) + storedBack, err := mqb.GetBackImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting back image: %s", err.Error()) } diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index 142be42ff..6bf42dc18 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -1,12 +1,16 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" "strings" + "github.com/doug-martin/goqu/v9" + "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" "github.com/stashapp/stash/pkg/utils" ) @@ -25,66 +29,63 @@ type performerQueryBuilder struct { repository } -func NewPerformerReaderWriter(tx dbi) *performerQueryBuilder { - return &performerQueryBuilder{ - repository{ - tx: tx, - tableName: performerTable, - idColumn: idColumn, - }, - } +var PerformerReaderWriter = &performerQueryBuilder{ + repository{ + tableName: performerTable, + idColumn: idColumn, + }, } -func (qb *performerQueryBuilder) Create(newObject models.Performer) (*models.Performer, error) { +func (qb *performerQueryBuilder) Create(ctx context.Context, newObject models.Performer) (*models.Performer, error) { var ret models.Performer - if err := qb.insertObject(newObject, &ret); err != nil { + if err := qb.insertObject(ctx, newObject, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *performerQueryBuilder) Update(updatedObject models.PerformerPartial) (*models.Performer, error) { +func (qb *performerQueryBuilder) Update(ctx context.Context, updatedObject models.PerformerPartial) (*models.Performer, error) { const partial = true - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } var ret models.Performer - if err := qb.get(updatedObject.ID, &ret); err != nil { + if err := qb.getByID(ctx, updatedObject.ID, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *performerQueryBuilder) UpdateFull(updatedObject models.Performer) (*models.Performer, error) { +func (qb *performerQueryBuilder) UpdateFull(ctx context.Context, updatedObject models.Performer) (*models.Performer, error) { const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } var ret models.Performer - if err := qb.get(updatedObject.ID, &ret); err != nil { + if err := qb.getByID(ctx, updatedObject.ID, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *performerQueryBuilder) Destroy(id int) error { +func (qb *performerQueryBuilder) Destroy(ctx context.Context, id int) error { // TODO - add on delete cascade to performers_scenes - _, err := qb.tx.Exec("DELETE FROM performers_scenes WHERE performer_id = ?", id) + _, err := qb.tx.Exec(ctx, "DELETE FROM performers_scenes WHERE performer_id = ?", id) if err != nil { return err } - return qb.destroyExisting([]int{id}) + return qb.destroyExisting(ctx, []int{id}) } -func (qb *performerQueryBuilder) Find(id int) (*models.Performer, error) { +func (qb *performerQueryBuilder) Find(ctx context.Context, id int) (*models.Performer, error) { var ret models.Performer - if err := qb.get(id, &ret); err != nil { + if err := qb.getByID(ctx, id, &ret); err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -93,62 +94,86 @@ func (qb *performerQueryBuilder) Find(id int) (*models.Performer, error) { return &ret, nil } -func (qb *performerQueryBuilder) FindMany(ids []int) ([]*models.Performer, error) { - var performers []*models.Performer - for _, id := range ids { - performer, err := qb.Find(id) - if err != nil { - return nil, err - } - - if performer == nil { - return nil, fmt.Errorf("performer with id %d not found", id) - } - - performers = append(performers, performer) +func (qb *performerQueryBuilder) FindMany(ctx context.Context, ids []int) ([]*models.Performer, error) { + tableMgr := performerTableMgr + q := goqu.Select("*").From(tableMgr.table).Where(tableMgr.byIDInts(ids...)) + unsorted, err := qb.getMany(ctx, q) + if err != nil { + return nil, err } - return performers, nil + ret := make([]*models.Performer, len(ids)) + + for _, s := range unsorted { + i := intslice.IntIndex(ids, s.ID) + ret[i] = s + } + + for i := range ret { + if ret[i] == nil { + return nil, fmt.Errorf("performer with id %d not found", ids[i]) + } + } + + return ret, nil } -func (qb *performerQueryBuilder) FindBySceneID(sceneID int) ([]*models.Performer, error) { +func (qb *performerQueryBuilder) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Performer, error) { + const single = false + var ret []*models.Performer + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f models.Performer + if err := r.StructScan(&f); err != nil { + return err + } + + ret = append(ret, &f) + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (qb *performerQueryBuilder) FindBySceneID(ctx context.Context, sceneID int) ([]*models.Performer, error) { query := selectAll("performers") + ` LEFT JOIN performers_scenes as scenes_join on scenes_join.performer_id = performers.id WHERE scenes_join.scene_id = ? ` args := []interface{}{sceneID} - return qb.queryPerformers(query, args) + return qb.queryPerformers(ctx, query, args) } -func (qb *performerQueryBuilder) FindByImageID(imageID int) ([]*models.Performer, error) { +func (qb *performerQueryBuilder) FindByImageID(ctx context.Context, imageID int) ([]*models.Performer, error) { query := selectAll("performers") + ` LEFT JOIN performers_images as images_join on images_join.performer_id = performers.id WHERE images_join.image_id = ? ` args := []interface{}{imageID} - return qb.queryPerformers(query, args) + return qb.queryPerformers(ctx, query, args) } -func (qb *performerQueryBuilder) FindByGalleryID(galleryID int) ([]*models.Performer, error) { +func (qb *performerQueryBuilder) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Performer, error) { query := selectAll("performers") + ` LEFT JOIN performers_galleries as galleries_join on galleries_join.performer_id = performers.id WHERE galleries_join.gallery_id = ? ` args := []interface{}{galleryID} - return qb.queryPerformers(query, args) + return qb.queryPerformers(ctx, query, args) } -func (qb *performerQueryBuilder) FindNamesBySceneID(sceneID int) ([]*models.Performer, error) { +func (qb *performerQueryBuilder) FindNamesBySceneID(ctx context.Context, sceneID int) ([]*models.Performer, error) { query := ` SELECT performers.name FROM performers LEFT JOIN performers_scenes as scenes_join on scenes_join.performer_id = performers.id WHERE scenes_join.scene_id = ? ` args := []interface{}{sceneID} - return qb.queryPerformers(query, args) + return qb.queryPerformers(ctx, query, args) } -func (qb *performerQueryBuilder) FindByNames(names []string, nocase bool) ([]*models.Performer, error) { +func (qb *performerQueryBuilder) FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Performer, error) { query := "SELECT * FROM performers WHERE name" if nocase { query += " COLLATE NOCASE" @@ -159,23 +184,23 @@ func (qb *performerQueryBuilder) FindByNames(names []string, nocase bool) ([]*mo for _, name := range names { args = append(args, name) } - return qb.queryPerformers(query, args) + return qb.queryPerformers(ctx, query, args) } -func (qb *performerQueryBuilder) CountByTagID(tagID int) (int, error) { +func (qb *performerQueryBuilder) CountByTagID(ctx context.Context, tagID int) (int, error) { args := []interface{}{tagID} - return qb.runCountQuery(qb.buildCountQuery(countPerformersForTagQuery), args) + return qb.runCountQuery(ctx, qb.buildCountQuery(countPerformersForTagQuery), args) } -func (qb *performerQueryBuilder) Count() (int, error) { - return qb.runCountQuery(qb.buildCountQuery("SELECT performers.id FROM performers"), nil) +func (qb *performerQueryBuilder) Count(ctx context.Context) (int, error) { + return qb.runCountQuery(ctx, qb.buildCountQuery("SELECT performers.id FROM performers"), nil) } -func (qb *performerQueryBuilder) All() ([]*models.Performer, error) { - return qb.queryPerformers(selectAll("performers")+qb.getPerformerSort(nil), nil) +func (qb *performerQueryBuilder) All(ctx context.Context) ([]*models.Performer, error) { + return qb.queryPerformers(ctx, selectAll("performers")+qb.getPerformerSort(nil), nil) } -func (qb *performerQueryBuilder) QueryForAutoTag(words []string) ([]*models.Performer, error) { +func (qb *performerQueryBuilder) QueryForAutoTag(ctx context.Context, words []string) ([]*models.Performer, error) { // TODO - Query needs to be changed to support queries of this type, and // this method should be removed query := selectAll(performerTable) @@ -196,7 +221,7 @@ func (qb *performerQueryBuilder) QueryForAutoTag(words []string) ([]*models.Perf "ignore_auto_tag = 0", whereOr, }, " AND ") - return qb.queryPerformers(query+" WHERE "+where, args) + return qb.queryPerformers(ctx, query+" WHERE "+where, args) } func (qb *performerQueryBuilder) validateFilter(filter *models.PerformerFilterType) error { @@ -230,74 +255,74 @@ func (qb *performerQueryBuilder) validateFilter(filter *models.PerformerFilterTy return nil } -func (qb *performerQueryBuilder) makeFilter(filter *models.PerformerFilterType) *filterBuilder { +func (qb *performerQueryBuilder) makeFilter(ctx context.Context, filter *models.PerformerFilterType) *filterBuilder { query := &filterBuilder{} if filter.And != nil { - query.and(qb.makeFilter(filter.And)) + query.and(qb.makeFilter(ctx, filter.And)) } if filter.Or != nil { - query.or(qb.makeFilter(filter.Or)) + query.or(qb.makeFilter(ctx, filter.Or)) } if filter.Not != nil { - query.not(qb.makeFilter(filter.Not)) + query.not(qb.makeFilter(ctx, filter.Not)) } const tableName = performerTable - query.handleCriterion(stringCriterionHandler(filter.Name, tableName+".name")) - query.handleCriterion(stringCriterionHandler(filter.Details, tableName+".details")) + query.handleCriterion(ctx, stringCriterionHandler(filter.Name, tableName+".name")) + query.handleCriterion(ctx, stringCriterionHandler(filter.Details, tableName+".details")) - query.handleCriterion(boolCriterionHandler(filter.FilterFavorites, tableName+".favorite")) - query.handleCriterion(boolCriterionHandler(filter.IgnoreAutoTag, tableName+".ignore_auto_tag")) + query.handleCriterion(ctx, boolCriterionHandler(filter.FilterFavorites, tableName+".favorite", nil)) + query.handleCriterion(ctx, boolCriterionHandler(filter.IgnoreAutoTag, tableName+".ignore_auto_tag", nil)) - query.handleCriterion(yearFilterCriterionHandler(filter.BirthYear, tableName+".birthdate")) - query.handleCriterion(yearFilterCriterionHandler(filter.DeathYear, tableName+".death_date")) + query.handleCriterion(ctx, yearFilterCriterionHandler(filter.BirthYear, tableName+".birthdate")) + query.handleCriterion(ctx, yearFilterCriterionHandler(filter.DeathYear, tableName+".death_date")) - query.handleCriterion(performerAgeFilterCriterionHandler(filter.Age)) + query.handleCriterion(ctx, performerAgeFilterCriterionHandler(filter.Age)) - query.handleCriterion(criterionHandlerFunc(func(f *filterBuilder) { + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if gender := filter.Gender; gender != nil { f.addWhere(tableName+".gender = ?", gender.Value.String()) } })) - query.handleCriterion(performerIsMissingCriterionHandler(qb, filter.IsMissing)) - query.handleCriterion(stringCriterionHandler(filter.Ethnicity, tableName+".ethnicity")) - query.handleCriterion(stringCriterionHandler(filter.Country, tableName+".country")) - query.handleCriterion(stringCriterionHandler(filter.EyeColor, tableName+".eye_color")) - query.handleCriterion(stringCriterionHandler(filter.Height, tableName+".height")) - query.handleCriterion(stringCriterionHandler(filter.Measurements, tableName+".measurements")) - query.handleCriterion(stringCriterionHandler(filter.FakeTits, tableName+".fake_tits")) - query.handleCriterion(stringCriterionHandler(filter.CareerLength, tableName+".career_length")) - query.handleCriterion(stringCriterionHandler(filter.Tattoos, tableName+".tattoos")) - query.handleCriterion(stringCriterionHandler(filter.Piercings, tableName+".piercings")) - query.handleCriterion(intCriterionHandler(filter.Rating, tableName+".rating")) - query.handleCriterion(stringCriterionHandler(filter.HairColor, tableName+".hair_color")) - query.handleCriterion(stringCriterionHandler(filter.URL, tableName+".url")) - query.handleCriterion(intCriterionHandler(filter.Weight, tableName+".weight")) - query.handleCriterion(criterionHandlerFunc(func(f *filterBuilder) { + query.handleCriterion(ctx, performerIsMissingCriterionHandler(qb, filter.IsMissing)) + query.handleCriterion(ctx, stringCriterionHandler(filter.Ethnicity, tableName+".ethnicity")) + query.handleCriterion(ctx, stringCriterionHandler(filter.Country, tableName+".country")) + query.handleCriterion(ctx, stringCriterionHandler(filter.EyeColor, tableName+".eye_color")) + query.handleCriterion(ctx, stringCriterionHandler(filter.Height, tableName+".height")) + query.handleCriterion(ctx, stringCriterionHandler(filter.Measurements, tableName+".measurements")) + query.handleCriterion(ctx, stringCriterionHandler(filter.FakeTits, tableName+".fake_tits")) + query.handleCriterion(ctx, stringCriterionHandler(filter.CareerLength, tableName+".career_length")) + query.handleCriterion(ctx, stringCriterionHandler(filter.Tattoos, tableName+".tattoos")) + query.handleCriterion(ctx, stringCriterionHandler(filter.Piercings, tableName+".piercings")) + query.handleCriterion(ctx, intCriterionHandler(filter.Rating, tableName+".rating", nil)) + query.handleCriterion(ctx, stringCriterionHandler(filter.HairColor, tableName+".hair_color")) + query.handleCriterion(ctx, stringCriterionHandler(filter.URL, tableName+".url")) + query.handleCriterion(ctx, intCriterionHandler(filter.Weight, tableName+".weight", nil)) + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if filter.StashID != nil { qb.stashIDRepository().join(f, "performer_stash_ids", "performers.id") - stringCriterionHandler(filter.StashID, "performer_stash_ids.stash_id")(f) + stringCriterionHandler(filter.StashID, "performer_stash_ids.stash_id")(ctx, f) } })) // TODO - need better handling of aliases - query.handleCriterion(stringCriterionHandler(filter.Aliases, tableName+".aliases")) + query.handleCriterion(ctx, stringCriterionHandler(filter.Aliases, tableName+".aliases")) - query.handleCriterion(performerTagsCriterionHandler(qb, filter.Tags)) + query.handleCriterion(ctx, performerTagsCriterionHandler(qb, filter.Tags)) - query.handleCriterion(performerStudiosCriterionHandler(qb, filter.Studios)) + query.handleCriterion(ctx, performerStudiosCriterionHandler(qb, filter.Studios)) - query.handleCriterion(performerTagCountCriterionHandler(qb, filter.TagCount)) - query.handleCriterion(performerSceneCountCriterionHandler(qb, filter.SceneCount)) - query.handleCriterion(performerImageCountCriterionHandler(qb, filter.ImageCount)) - query.handleCriterion(performerGalleryCountCriterionHandler(qb, filter.GalleryCount)) + query.handleCriterion(ctx, performerTagCountCriterionHandler(qb, filter.TagCount)) + query.handleCriterion(ctx, performerSceneCountCriterionHandler(qb, filter.SceneCount)) + query.handleCriterion(ctx, performerImageCountCriterionHandler(qb, filter.ImageCount)) + query.handleCriterion(ctx, performerGalleryCountCriterionHandler(qb, filter.GalleryCount)) return query } -func (qb *performerQueryBuilder) Query(performerFilter *models.PerformerFilterType, findFilter *models.FindFilterType) ([]*models.Performer, int, error) { +func (qb *performerQueryBuilder) Query(ctx context.Context, performerFilter *models.PerformerFilterType, findFilter *models.FindFilterType) ([]*models.Performer, int, error) { if performerFilter == nil { performerFilter = &models.PerformerFilterType{} } @@ -316,30 +341,26 @@ func (qb *performerQueryBuilder) Query(performerFilter *models.PerformerFilterTy if err := qb.validateFilter(performerFilter); err != nil { return nil, 0, err } - filter := qb.makeFilter(performerFilter) + filter := qb.makeFilter(ctx, performerFilter) query.addFilter(filter) query.sortAndPagination = qb.getPerformerSort(findFilter) + getPagination(findFilter) - idsResult, countResult, err := query.executeFind() + idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err } - var performers []*models.Performer - for _, id := range idsResult { - performer, err := qb.Find(id) - if err != nil { - return nil, 0, err - } - performers = append(performers, performer) + performers, err := qb.FindMany(ctx, idsResult) + if err != nil { + return nil, 0, err } return performers, countResult, nil } func performerIsMissingCriterionHandler(qb *performerQueryBuilder, isMissing *string) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if isMissing != nil && *isMissing != "" { switch *isMissing { case "scenes": // Deprecated: use `scene_count == 0` filter instead @@ -359,7 +380,7 @@ func performerIsMissingCriterionHandler(qb *performerQueryBuilder, isMissing *st } func yearFilterCriterionHandler(year *models.IntCriterionInput, col string) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if year != nil && year.Modifier.IsValid() { clause, args := getIntCriterionWhereClause("cast(strftime('%Y', "+col+") as int)", *year) f.addWhere(clause, args...) @@ -368,7 +389,7 @@ func yearFilterCriterionHandler(year *models.IntCriterionInput, col string) crit } func performerAgeFilterCriterionHandler(age *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if age != nil && age.Modifier.IsValid() { clause, args := getIntCriterionWhereClause( "cast(IFNULL(strftime('%Y.%m%d', performers.death_date), strftime('%Y.%m%d', 'now')) - strftime('%Y.%m%d', performers.birthdate) as int)", @@ -437,7 +458,7 @@ func performerGalleryCountCriterionHandler(qb *performerQueryBuilder, count *mod } func performerStudiosCriterionHandler(qb *performerQueryBuilder, studios *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if studios != nil { formatMaps := []utils.StrFormatMap{ { @@ -493,7 +514,7 @@ func performerStudiosCriterionHandler(qb *performerQueryBuilder, studios *models } const derivedPerformerStudioTable = "performer_studio" - valuesClause := getHierarchicalValues(qb.tx, studios.Value, studioTable, "", "parent_id", studios.Depth) + valuesClause := getHierarchicalValues(ctx, qb.tx, studios.Value, studioTable, "", "parent_id", studios.Depth) f.addWith("studio(root_id, item_id) AS (" + valuesClause + ")") templStr := `SELECT performer_id FROM {primaryTable} @@ -540,9 +561,9 @@ func (qb *performerQueryBuilder) getPerformerSort(findFilter *models.FindFilterT return getSort(sort, direction, "performers") } -func (qb *performerQueryBuilder) queryPerformers(query string, args []interface{}) ([]*models.Performer, error) { +func (qb *performerQueryBuilder) queryPerformers(ctx context.Context, query string, args []interface{}) ([]*models.Performer, error) { var ret models.Performers - if err := qb.query(query, args, &ret); err != nil { + if err := qb.query(ctx, query, args, &ret); err != nil { return nil, err } @@ -560,13 +581,13 @@ func (qb *performerQueryBuilder) tagsRepository() *joinRepository { } } -func (qb *performerQueryBuilder) GetTagIDs(id int) ([]int, error) { - return qb.tagsRepository().getIDs(id) +func (qb *performerQueryBuilder) GetTagIDs(ctx context.Context, id int) ([]int, error) { + return qb.tagsRepository().getIDs(ctx, id) } -func (qb *performerQueryBuilder) UpdateTags(id int, tagIDs []int) error { +func (qb *performerQueryBuilder) UpdateTags(ctx context.Context, id int, tagIDs []int) error { // Delete the existing joins and then create new ones - return qb.tagsRepository().replace(id, tagIDs) + return qb.tagsRepository().replace(ctx, id, tagIDs) } func (qb *performerQueryBuilder) imageRepository() *imageRepository { @@ -580,16 +601,16 @@ func (qb *performerQueryBuilder) imageRepository() *imageRepository { } } -func (qb *performerQueryBuilder) GetImage(performerID int) ([]byte, error) { - return qb.imageRepository().get(performerID) +func (qb *performerQueryBuilder) GetImage(ctx context.Context, performerID int) ([]byte, error) { + return qb.imageRepository().get(ctx, performerID) } -func (qb *performerQueryBuilder) UpdateImage(performerID int, image []byte) error { - return qb.imageRepository().replace(performerID, image) +func (qb *performerQueryBuilder) UpdateImage(ctx context.Context, performerID int, image []byte) error { + return qb.imageRepository().replace(ctx, performerID, image) } -func (qb *performerQueryBuilder) DestroyImage(performerID int) error { - return qb.imageRepository().destroy([]int{performerID}) +func (qb *performerQueryBuilder) DestroyImage(ctx context.Context, performerID int) error { + return qb.imageRepository().destroy(ctx, []int{performerID}) } func (qb *performerQueryBuilder) stashIDRepository() *stashIDRepository { @@ -602,25 +623,25 @@ func (qb *performerQueryBuilder) stashIDRepository() *stashIDRepository { } } -func (qb *performerQueryBuilder) GetStashIDs(performerID int) ([]*models.StashID, error) { - return qb.stashIDRepository().get(performerID) +func (qb *performerQueryBuilder) GetStashIDs(ctx context.Context, performerID int) ([]models.StashID, error) { + return qb.stashIDRepository().get(ctx, performerID) } -func (qb *performerQueryBuilder) UpdateStashIDs(performerID int, stashIDs []models.StashID) error { - return qb.stashIDRepository().replace(performerID, stashIDs) +func (qb *performerQueryBuilder) UpdateStashIDs(ctx context.Context, performerID int, stashIDs []models.StashID) error { + return qb.stashIDRepository().replace(ctx, performerID, stashIDs) } -func (qb *performerQueryBuilder) FindByStashID(stashID models.StashID) ([]*models.Performer, error) { +func (qb *performerQueryBuilder) FindByStashID(ctx context.Context, stashID models.StashID) ([]*models.Performer, error) { query := selectAll("performers") + ` LEFT JOIN performer_stash_ids on performer_stash_ids.performer_id = performers.id WHERE performer_stash_ids.stash_id = ? AND performer_stash_ids.endpoint = ? ` args := []interface{}{stashID.StashID, stashID.Endpoint} - return qb.queryPerformers(query, args) + return qb.queryPerformers(ctx, query, args) } -func (qb *performerQueryBuilder) FindByStashIDStatus(hasStashID bool, stashboxEndpoint string) ([]*models.Performer, error) { +func (qb *performerQueryBuilder) FindByStashIDStatus(ctx context.Context, hasStashID bool, stashboxEndpoint string) ([]*models.Performer, error) { query := selectAll("performers") + ` LEFT JOIN performer_stash_ids on performer_stash_ids.performer_id = performers.id ` @@ -637,5 +658,5 @@ func (qb *performerQueryBuilder) FindByStashIDStatus(hasStashID bool, stashboxEn } args := []interface{}{stashboxEndpoint} - return qb.queryPerformers(query, args) + return qb.queryPerformers(ctx, query, args) } diff --git a/pkg/sqlite/performer_test.go b/pkg/sqlite/performer_test.go index a6839f573..2075407a5 100644 --- a/pkg/sqlite/performer_test.go +++ b/pkg/sqlite/performer_test.go @@ -4,6 +4,7 @@ package sqlite_test import ( + "context" "database/sql" "fmt" "math" @@ -16,14 +17,15 @@ import ( "github.com/stashapp/stash/pkg/hash/md5" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sqlite" ) func TestPerformerFindBySceneID(t *testing.T) { - withTxn(func(r models.Repository) error { - pqb := r.Performer() + withTxn(func(ctx context.Context) error { + pqb := sqlite.PerformerReaderWriter sceneID := sceneIDs[sceneIdxWithPerformer] - performers, err := pqb.FindBySceneID(sceneID) + performers, err := pqb.FindBySceneID(ctx, sceneID) if err != nil { t.Errorf("Error finding performer: %s", err.Error()) @@ -34,7 +36,7 @@ func TestPerformerFindBySceneID(t *testing.T) { assert.Equal(t, getPerformerStringValue(performerIdxWithScene, "Name"), performer.Name.String) - performers, err = pqb.FindBySceneID(0) + performers, err = pqb.FindBySceneID(ctx, 0) if err != nil { t.Errorf("Error finding performer: %s", err.Error()) @@ -55,21 +57,21 @@ func TestPerformerFindByNames(t *testing.T) { return ret } - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { var names []string - pqb := r.Performer() + pqb := sqlite.PerformerReaderWriter names = append(names, performerNames[performerIdxWithScene]) // find performers by names - performers, err := pqb.FindByNames(names, false) + performers, err := pqb.FindByNames(ctx, names, false) if err != nil { t.Errorf("Error finding performers: %s", err.Error()) } assert.Len(t, performers, 1) assert.Equal(t, performerNames[performerIdxWithScene], performers[0].Name.String) - performers, err = pqb.FindByNames(names, true) // find performers by names nocase + performers, err = pqb.FindByNames(ctx, names, true) // find performers by names nocase if err != nil { t.Errorf("Error finding performers: %s", err.Error()) } @@ -79,14 +81,14 @@ func TestPerformerFindByNames(t *testing.T) { names = append(names, performerNames[performerIdx1WithScene]) // find performers by names ( 2 names ) - performers, err = pqb.FindByNames(names, false) + performers, err = pqb.FindByNames(ctx, names, false) if err != nil { t.Errorf("Error finding performers: %s", err.Error()) } retNames := getNames(performers) assert.Equal(t, names, retNames) - performers, err = pqb.FindByNames(names, true) // find performers by names ( 2 names nocase) + performers, err = pqb.FindByNames(ctx, names, true) // find performers by names ( 2 names nocase) if err != nil { t.Errorf("Error finding performers: %s", err.Error()) } @@ -122,10 +124,10 @@ func TestPerformerQueryEthnicityOr(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Len(t, performers, 2) assert.Equal(t, performer1Eth, performers[0].Ethnicity.String) @@ -153,10 +155,10 @@ func TestPerformerQueryEthnicityAndRating(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Len(t, performers, 1) assert.Equal(t, performerEth, performers[0].Ethnicity.String) @@ -188,10 +190,10 @@ func TestPerformerQueryEthnicityNotRating(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) for _, performer := range performers { verifyString(t, performer.Ethnicity.String, ethCriterion) @@ -219,20 +221,20 @@ func TestPerformerIllegalQuery(t *testing.T) { Or: &subFilter, } - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter - _, _, err := sqb.Query(performerFilter, nil) + _, _, err := sqb.Query(ctx, performerFilter, nil) assert.NotNil(err) performerFilter.Or = nil performerFilter.Not = &subFilter - _, _, err = sqb.Query(performerFilter, nil) + _, _, err = sqb.Query(ctx, performerFilter, nil) assert.NotNil(err) performerFilter.And = nil performerFilter.Or = &subFilter - _, _, err = sqb.Query(performerFilter, nil) + _, _, err = sqb.Query(ctx, performerFilter, nil) assert.NotNil(err) return nil @@ -240,15 +242,15 @@ func TestPerformerIllegalQuery(t *testing.T) { } func TestPerformerQueryIgnoreAutoTag(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { ignoreAutoTag := true performerFilter := models.PerformerFilterType{ IgnoreAutoTag: &ignoreAutoTag, } - sqb := r.Performer() + sqb := sqlite.PerformerReaderWriter - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Len(t, performers, int(math.Ceil(float64(totalPerformers)/5))) for _, p := range performers { @@ -260,12 +262,12 @@ func TestPerformerQueryIgnoreAutoTag(t *testing.T) { } func TestPerformerQueryForAutoTag(t *testing.T) { - withTxn(func(r models.Repository) error { - tqb := r.Performer() + withTxn(func(ctx context.Context) error { + tqb := sqlite.PerformerReaderWriter name := performerNames[performerIdx1WithScene] // find a performer by name - performers, err := tqb.QueryForAutoTag([]string{name}) + performers, err := tqb.QueryForAutoTag(ctx, []string{name}) if err != nil { t.Errorf("Error finding performers: %s", err.Error()) @@ -280,8 +282,8 @@ func TestPerformerQueryForAutoTag(t *testing.T) { } func TestPerformerUpdatePerformerImage(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Performer() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.PerformerReaderWriter // create performer to test against const name = "TestPerformerUpdatePerformerImage" @@ -290,26 +292,26 @@ func TestPerformerUpdatePerformerImage(t *testing.T) { Checksum: md5.FromString(name), Favorite: sql.NullBool{Bool: false, Valid: true}, } - created, err := qb.Create(performer) + created, err := qb.Create(ctx, performer) if err != nil { return fmt.Errorf("Error creating performer: %s", err.Error()) } image := []byte("image") - err = qb.UpdateImage(created.ID, image) + err = qb.UpdateImage(ctx, created.ID, image) if err != nil { return fmt.Errorf("Error updating performer image: %s", err.Error()) } // ensure image set - storedImage, err := qb.GetImage(created.ID) + storedImage, err := qb.GetImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting image: %s", err.Error()) } assert.Equal(t, storedImage, image) // set nil image - err = qb.UpdateImage(created.ID, nil) + err = qb.UpdateImage(ctx, created.ID, nil) if err == nil { return fmt.Errorf("Expected error setting nil image") } @@ -321,8 +323,8 @@ func TestPerformerUpdatePerformerImage(t *testing.T) { } func TestPerformerDestroyPerformerImage(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Performer() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.PerformerReaderWriter // create performer to test against const name = "TestPerformerDestroyPerformerImage" @@ -331,24 +333,24 @@ func TestPerformerDestroyPerformerImage(t *testing.T) { Checksum: md5.FromString(name), Favorite: sql.NullBool{Bool: false, Valid: true}, } - created, err := qb.Create(performer) + created, err := qb.Create(ctx, performer) if err != nil { return fmt.Errorf("Error creating performer: %s", err.Error()) } image := []byte("image") - err = qb.UpdateImage(created.ID, image) + err = qb.UpdateImage(ctx, created.ID, image) if err != nil { return fmt.Errorf("Error updating performer image: %s", err.Error()) } - err = qb.DestroyImage(created.ID) + err = qb.DestroyImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error destroying performer image: %s", err.Error()) } // image should be nil - storedImage, err := qb.GetImage(created.ID) + storedImage, err := qb.GetImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting image: %s", err.Error()) } @@ -380,13 +382,13 @@ func TestPerformerQueryAge(t *testing.T) { } func verifyPerformerAge(t *testing.T, ageCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - qb := r.Performer() + withTxn(func(ctx context.Context) error { + qb := sqlite.PerformerReaderWriter performerFilter := models.PerformerFilterType{ Age: &ageCriterion, } - performers, _, err := qb.Query(&performerFilter, nil) + performers, _, err := qb.Query(ctx, &performerFilter, nil) if err != nil { t.Errorf("Error querying performer: %s", err.Error()) } @@ -433,13 +435,13 @@ func TestPerformerQueryCareerLength(t *testing.T) { } func verifyPerformerCareerLength(t *testing.T, criterion models.StringCriterionInput) { - withTxn(func(r models.Repository) error { - qb := r.Performer() + withTxn(func(ctx context.Context) error { + qb := sqlite.PerformerReaderWriter performerFilter := models.PerformerFilterType{ CareerLength: &criterion, } - performers, _, err := qb.Query(&performerFilter, nil) + performers, _, err := qb.Query(ctx, &performerFilter, nil) if err != nil { t.Errorf("Error querying performer: %s", err.Error()) } @@ -492,11 +494,11 @@ func TestPerformerQueryURL(t *testing.T) { } func verifyPerformerQuery(t *testing.T, filter models.PerformerFilterType, verifyFn func(s *models.Performer)) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { t.Helper() - sqb := r.Performer() + sqb := sqlite.PerformerReaderWriter - performers := queryPerformers(t, sqb, &filter, nil) + performers := queryPerformers(ctx, t, sqb, &filter, nil) // assume it should find at least one assert.Greater(t, len(performers), 0) @@ -509,8 +511,8 @@ func verifyPerformerQuery(t *testing.T, filter models.PerformerFilterType, verif }) } -func queryPerformers(t *testing.T, qb models.PerformerReader, performerFilter *models.PerformerFilterType, findFilter *models.FindFilterType) []*models.Performer { - performers, _, err := qb.Query(performerFilter, findFilter) +func queryPerformers(ctx context.Context, t *testing.T, qb models.PerformerReader, performerFilter *models.PerformerFilterType, findFilter *models.FindFilterType) []*models.Performer { + performers, _, err := qb.Query(ctx, performerFilter, findFilter) if err != nil { t.Errorf("Error querying performers: %s", err.Error()) } @@ -519,8 +521,8 @@ func queryPerformers(t *testing.T, qb models.PerformerReader, performerFilter *m } func TestPerformerQueryTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithPerformer]), @@ -534,7 +536,7 @@ func TestPerformerQueryTags(t *testing.T) { } // ensure ids are correct - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Len(t, performers, 2) for _, performer := range performers { assert.True(t, performer.ID == performerIDs[performerIdxWithTag] || performer.ID == performerIDs[performerIdxWithTwoTags]) @@ -548,7 +550,7 @@ func TestPerformerQueryTags(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - performers = queryPerformers(t, sqb, &performerFilter, nil) + performers = queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Len(t, performers, 1) assert.Equal(t, sceneIDs[performerIdxWithTwoTags], performers[0].ID) @@ -565,7 +567,7 @@ func TestPerformerQueryTags(t *testing.T) { Q: &q, } - performers = queryPerformers(t, sqb, &performerFilter, &findFilter) + performers = queryPerformers(ctx, t, sqb, &performerFilter, &findFilter) assert.Len(t, performers, 0) return nil @@ -592,17 +594,17 @@ func TestPerformerQueryTagCount(t *testing.T) { } func verifyPerformersTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter performerFilter := models.PerformerFilterType{ TagCount: &tagCountCriterion, } - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Greater(t, len(performers), 0) for _, performer := range performers { - ids, err := sqb.GetTagIDs(performer.ID) + ids, err := sqb.GetTagIDs(ctx, performer.ID) if err != nil { return err } @@ -633,17 +635,17 @@ func TestPerformerQuerySceneCount(t *testing.T) { } func verifyPerformersSceneCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter performerFilter := models.PerformerFilterType{ SceneCount: &sceneCountCriterion, } - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Greater(t, len(performers), 0) for _, performer := range performers { - ids, err := r.Scene().FindByPerformerID(performer.ID) + ids, err := db.Scene.FindByPerformerID(ctx, performer.ID) if err != nil { return err } @@ -674,19 +676,19 @@ func TestPerformerQueryImageCount(t *testing.T) { } func verifyPerformersImageCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter performerFilter := models.PerformerFilterType{ ImageCount: &imageCountCriterion, } - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Greater(t, len(performers), 0) for _, performer := range performers { pp := 0 - result, err := r.Image().Query(models.ImageQueryOptions{ + result, err := db.Image.Query(ctx, models.ImageQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: &models.FindFilterType{ PerPage: &pp, @@ -730,19 +732,19 @@ func TestPerformerQueryGalleryCount(t *testing.T) { } func verifyPerformersGalleryCount(t *testing.T, galleryCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter performerFilter := models.PerformerFilterType{ GalleryCount: &galleryCountCriterion, } - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Greater(t, len(performers), 0) for _, performer := range performers { pp := 0 - _, count, err := r.Gallery().Query(&models.GalleryFilterType{ + _, count, err := db.Gallery.Query(ctx, &models.GalleryFilterType{ Performers: &models.MultiCriterionInput{ Value: []string{strconv.Itoa(performer.ID)}, Modifier: models.CriterionModifierIncludes, @@ -761,7 +763,7 @@ func verifyPerformersGalleryCount(t *testing.T, galleryCountCriterion models.Int } func TestPerformerQueryStudio(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { testCases := []struct { studioIndex int performerIndex int @@ -771,7 +773,7 @@ func TestPerformerQueryStudio(t *testing.T) { {studioIndex: studioIdxWithGalleryPerformer, performerIndex: performerIdxWithGalleryStudio}, } - sqb := r.Performer() + sqb := sqlite.PerformerReaderWriter for _, tc := range testCases { studioCriterion := models.HierarchicalMultiCriterionInput{ @@ -785,7 +787,7 @@ func TestPerformerQueryStudio(t *testing.T) { Studios: &studioCriterion, } - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.Len(t, performers, 1) @@ -804,7 +806,7 @@ func TestPerformerQueryStudio(t *testing.T) { Q: &q, } - performers = queryPerformers(t, sqb, &performerFilter, &findFilter) + performers = queryPerformers(ctx, t, sqb, &performerFilter, &findFilter) assert.Len(t, performers, 0) } @@ -819,21 +821,21 @@ func TestPerformerQueryStudio(t *testing.T) { Q: &q, } - performers := queryPerformers(t, sqb, performerFilter, findFilter) + performers := queryPerformers(ctx, t, sqb, performerFilter, findFilter) assert.Len(t, performers, 1) assert.Equal(t, imageIDs[performerIdx1WithImage], performers[0].ID) q = getPerformerStringValue(performerIdxWithSceneStudio, "Name") - performers = queryPerformers(t, sqb, performerFilter, findFilter) + performers = queryPerformers(ctx, t, sqb, performerFilter, findFilter) assert.Len(t, performers, 0) performerFilter.Studios.Modifier = models.CriterionModifierNotNull - performers = queryPerformers(t, sqb, performerFilter, findFilter) + performers = queryPerformers(ctx, t, sqb, performerFilter, findFilter) assert.Len(t, performers, 1) assert.Equal(t, imageIDs[performerIdxWithSceneStudio], performers[0].ID) q = getPerformerStringValue(performerIdx1WithImage, "Name") - performers = queryPerformers(t, sqb, performerFilter, findFilter) + performers = queryPerformers(ctx, t, sqb, performerFilter, findFilter) assert.Len(t, performers, 0) return nil @@ -841,8 +843,8 @@ func TestPerformerQueryStudio(t *testing.T) { } func TestPerformerStashIDs(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Performer() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.PerformerReaderWriter // create performer to test against const name = "TestStashIDs" @@ -851,12 +853,12 @@ func TestPerformerStashIDs(t *testing.T) { Checksum: md5.FromString(name), Favorite: sql.NullBool{Bool: false, Valid: true}, } - created, err := qb.Create(performer) + created, err := qb.Create(ctx, performer) if err != nil { return fmt.Errorf("Error creating performer: %s", err.Error()) } - testStashIDReaderWriter(t, qb, created.ID) + testStashIDReaderWriter(ctx, t, qb, created.ID) return nil }); err != nil { t.Error(err.Error()) @@ -888,13 +890,13 @@ func TestPerformerQueryRating(t *testing.T) { } func verifyPerformersRating(t *testing.T, ratingCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter performerFilter := models.PerformerFilterType{ Rating: &ratingCriterion, } - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) for _, performer := range performers { verifyInt64(t, performer.Rating, ratingCriterion) @@ -905,14 +907,14 @@ func verifyPerformersRating(t *testing.T, ratingCriterion models.IntCriterionInp } func TestPerformerQueryIsMissingRating(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Performer() + withTxn(func(ctx context.Context) error { + sqb := sqlite.PerformerReaderWriter isMissing := "rating" performerFilter := models.PerformerFilterType{ IsMissing: &isMissing, } - performers := queryPerformers(t, sqb, &performerFilter, nil) + performers := queryPerformers(ctx, t, sqb, &performerFilter, nil) assert.True(t, len(performers) > 0) @@ -925,14 +927,14 @@ func TestPerformerQueryIsMissingRating(t *testing.T) { } func TestPerformerQueryIsMissingImage(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { isMissing := "image" performerFilter := &models.PerformerFilterType{ IsMissing: &isMissing, } // ensure query does not error - performers, _, err := r.Performer().Query(performerFilter, nil) + performers, _, err := sqlite.PerformerReaderWriter.Query(ctx, performerFilter, nil) if err != nil { t.Errorf("Error querying performers: %s", err.Error()) } @@ -940,7 +942,7 @@ func TestPerformerQueryIsMissingImage(t *testing.T) { assert.True(t, len(performers) > 0) for _, performer := range performers { - img, err := r.Performer().GetImage(performer.ID) + img, err := sqlite.PerformerReaderWriter.GetImage(ctx, performer.ID) if err != nil { t.Errorf("error getting performer image: %s", err.Error()) } @@ -959,9 +961,9 @@ func TestPerformerQuerySortScenesCount(t *testing.T) { Direction: &direction, } - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { // just ensure it queries without error - performers, _, err := r.Performer().Query(nil, findFilter) + performers, _, err := sqlite.PerformerReaderWriter.Query(ctx, nil, findFilter) if err != nil { t.Errorf("Error querying performers: %s", err.Error()) } @@ -976,7 +978,7 @@ func TestPerformerQuerySortScenesCount(t *testing.T) { // sort in ascending order direction = models.SortDirectionEnumAsc - performers, _, err = r.Performer().Query(nil, findFilter) + performers, _, err = sqlite.PerformerReaderWriter.Query(ctx, nil, findFilter) if err != nil { t.Errorf("Error querying performers: %s", err.Error()) } diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 27ce213b5..00b790955 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -1,10 +1,10 @@ package sqlite import ( + "context" "fmt" "strings" - "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" ) @@ -54,24 +54,23 @@ func (qb queryBuilder) toSQL(includeSortPagination bool) string { return body } -func (qb queryBuilder) findIDs() ([]int, error) { +func (qb queryBuilder) findIDs(ctx context.Context) ([]int, error) { const includeSortPagination = true sql := qb.toSQL(includeSortPagination) - logger.Tracef("SQL: %s, args: %v", sql, qb.args) - return qb.repository.runIdsQuery(sql, qb.args) + return qb.repository.runIdsQuery(ctx, sql, qb.args) } -func (qb queryBuilder) executeFind() ([]int, int, error) { +func (qb queryBuilder) executeFind(ctx context.Context) ([]int, int, error) { if qb.err != nil { return nil, 0, qb.err } body := qb.body() - return qb.repository.executeFindQuery(body, qb.args, qb.sortAndPagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.recursiveWith) + return qb.repository.executeFindQuery(ctx, body, qb.args, qb.sortAndPagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.recursiveWith) } -func (qb queryBuilder) executeCount() (int, error) { +func (qb queryBuilder) executeCount(ctx context.Context) (int, error) { if qb.err != nil { return 0, qb.err } @@ -89,7 +88,7 @@ func (qb queryBuilder) executeCount() (int, error) { body = qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses) countQuery := withClause + qb.repository.buildCountQuery(body) - return qb.repository.runCountQuery(countQuery, qb.args) + return qb.repository.runCountQuery(ctx, countQuery, qb.args) } func (qb *queryBuilder) addWhere(clauses ...string) { diff --git a/pkg/sqlite/record.go b/pkg/sqlite/record.go new file mode 100644 index 000000000..2214766c4 --- /dev/null +++ b/pkg/sqlite/record.go @@ -0,0 +1,112 @@ +package sqlite + +import ( + "github.com/doug-martin/goqu/v9/exp" + "github.com/stashapp/stash/pkg/models" + "gopkg.in/guregu/null.v4/zero" +) + +type updateRecord struct { + exp.Record +} + +func (r *updateRecord) set(destField string, v interface{}) { + r.Record[destField] = v +} + +// func (r *updateRecord) setString(destField string, v models.OptionalString) { +// if v.Set { +// if v.Null { +// panic("null value not allowed in optional string") +// } +// r.set(destField, v.Value) +// } +// } + +func (r *updateRecord) setNullString(destField string, v models.OptionalString) { + if v.Set { + r.set(destField, zero.StringFromPtr(v.Ptr())) + } +} + +func (r *updateRecord) setBool(destField string, v models.OptionalBool) { + if v.Set { + if v.Null { + panic("null value not allowed in optional int") + } + r.set(destField, v.Value) + } +} + +func (r *updateRecord) setInt(destField string, v models.OptionalInt) { + if v.Set { + if v.Null { + panic("null value not allowed in optional int") + } + r.set(destField, v.Value) + } +} + +func (r *updateRecord) setNullInt(destField string, v models.OptionalInt) { + if v.Set { + r.set(destField, intFromPtr(v.Ptr())) + } +} + +// func (r *updateRecord) setInt64(destField string, v models.OptionalInt64) { +// if v.Set { +// if v.Null { +// panic("null value not allowed in optional int64") +// } +// r.set(destField, v.Value) +// } +// } + +// func (r *updateRecord) setNullInt64(destField string, v models.OptionalInt64) { +// if v.Set { +// r.set(destField, null.IntFromPtr(v.Ptr())) +// } +// } + +// func (r *updateRecord) setFloat64(destField string, v models.OptionalFloat64) { +// if v.Set { +// if v.Null { +// panic("null value not allowed in optional float64") +// } +// r.set(destField, v.Value) +// } +// } + +// func (r *updateRecord) setNullFloat64(destField string, v models.OptionalFloat64) { +// if v.Set { +// r.set(destField, null.FloatFromPtr(v.Ptr())) +// } +// } + +func (r *updateRecord) setTime(destField string, v models.OptionalTime) { + if v.Set { + if v.Null { + panic("null value not allowed in optional time") + } + r.set(destField, v.Value) + } +} + +// func (r *updateRecord) setNullTime(destField string, v models.OptionalTime) { +// if v.Set { +// r.set(destField, null.TimeFromPtr(v.Ptr())) +// } +// } + +func (r *updateRecord) setSQLiteDate(destField string, v models.OptionalDate) { + if v.Set { + if v.Null { + r.set(destField, models.SQLiteDate{}) + } + + r.set(destField, models.SQLiteDate{ + String: v.Value.String(), + Valid: true, + }) + } +} diff --git a/pkg/database/regex.go b/pkg/sqlite/regex.go similarity index 98% rename from pkg/database/regex.go rename to pkg/sqlite/regex.go index dc7b5feb5..bbf713ae3 100644 --- a/pkg/database/regex.go +++ b/pkg/sqlite/regex.go @@ -1,4 +1,4 @@ -package database +package sqlite import ( "regexp" diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index f195d9c7e..437877ee6 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -1,6 +1,7 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" @@ -9,7 +10,7 @@ import ( "github.com/jmoiron/sqlx" - "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" ) @@ -21,28 +22,28 @@ type objectList interface { } type repository struct { - tx dbi + tx dbWrapper tableName string idColumn string } -func (r *repository) get(id int, dest interface{}) error { +func (r *repository) getByID(ctx context.Context, id int, dest interface{}) error { stmt := fmt.Sprintf("SELECT * FROM %s WHERE %s = ? LIMIT 1", r.tableName, r.idColumn) - return r.tx.Get(dest, stmt, id) + return r.tx.Get(ctx, dest, stmt, id) } -func (r *repository) getAll(id int, f func(rows *sqlx.Rows) error) error { +func (r *repository) getAll(ctx context.Context, id int, f func(rows *sqlx.Rows) error) error { stmt := fmt.Sprintf("SELECT * FROM %s WHERE %s = ?", r.tableName, r.idColumn) - return r.queryFunc(stmt, []interface{}{id}, false, f) + return r.queryFunc(ctx, stmt, []interface{}{id}, false, f) } -func (r *repository) insert(obj interface{}) (sql.Result, error) { +func (r *repository) insert(ctx context.Context, obj interface{}) (sql.Result, error) { stmt := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", r.tableName, listKeys(obj, false), listKeys(obj, true)) - return r.tx.NamedExec(stmt, obj) + return r.tx.NamedExec(ctx, stmt, obj) } -func (r *repository) insertObject(obj interface{}, out interface{}) error { - result, err := r.insert(obj) +func (r *repository) insertObject(ctx context.Context, obj interface{}, out interface{}) error { + result, err := r.insert(ctx, obj) if err != nil { return err } @@ -50,11 +51,11 @@ func (r *repository) insertObject(obj interface{}, out interface{}) error { if err != nil { return err } - return r.get(int(id), out) + return r.getByID(ctx, int(id), out) } -func (r *repository) update(id int, obj interface{}, partial bool) error { - exists, err := r.exists(id) +func (r *repository) update(ctx context.Context, id int, obj interface{}, partial bool) error { + exists, err := r.exists(ctx, id) if err != nil { return err } @@ -64,30 +65,30 @@ func (r *repository) update(id int, obj interface{}, partial bool) error { } stmt := fmt.Sprintf("UPDATE %s SET %s WHERE %s.%s = :id", r.tableName, updateSet(obj, partial), r.tableName, r.idColumn) - _, err = r.tx.NamedExec(stmt, obj) + _, err = r.tx.NamedExec(ctx, stmt, obj) return err } -func (r *repository) updateMap(id int, m map[string]interface{}) error { - exists, err := r.exists(id) - if err != nil { - return err - } +// func (r *repository) updateMap(ctx context.Context, id int, m map[string]interface{}) error { +// exists, err := r.exists(ctx, id) +// if err != nil { +// return err +// } - if !exists { - return fmt.Errorf("%s %d does not exist in %s", r.idColumn, id, r.tableName) - } +// if !exists { +// return fmt.Errorf("%s %d does not exist in %s", r.idColumn, id, r.tableName) +// } - stmt := fmt.Sprintf("UPDATE %s SET %s WHERE %s.%s = :id", r.tableName, updateSetMap(m), r.tableName, r.idColumn) - _, err = r.tx.NamedExec(stmt, m) +// stmt := fmt.Sprintf("UPDATE %s SET %s WHERE %s.%s = :id", r.tableName, updateSetMap(m), r.tableName, r.idColumn) +// _, err = r.tx.NamedExec(ctx, stmt, m) - return err -} +// return err +// } -func (r *repository) destroyExisting(ids []int) error { +func (r *repository) destroyExisting(ctx context.Context, ids []int) error { for _, id := range ids { - exists, err := r.exists(id) + exists, err := r.exists(ctx, id) if err != nil { return err } @@ -97,13 +98,13 @@ func (r *repository) destroyExisting(ids []int) error { } } - return r.destroy(ids) + return r.destroy(ctx, ids) } -func (r *repository) destroy(ids []int) error { +func (r *repository) destroy(ctx context.Context, ids []int) error { for _, id := range ids { stmt := fmt.Sprintf("DELETE FROM %s WHERE %s = ?", r.tableName, r.idColumn) - if _, err := r.tx.Exec(stmt, id); err != nil { + if _, err := r.tx.Exec(ctx, stmt, id); err != nil { return err } } @@ -111,11 +112,11 @@ func (r *repository) destroy(ids []int) error { return nil } -func (r *repository) exists(id int) (bool, error) { +func (r *repository) exists(ctx context.Context, id int) (bool, error) { stmt := fmt.Sprintf("SELECT %s FROM %s WHERE %s = ? LIMIT 1", r.idColumn, r.tableName, r.idColumn) stmt = r.buildCountQuery(stmt) - c, err := r.runCountQuery(stmt, []interface{}{id}) + c, err := r.runCountQuery(ctx, stmt, []interface{}{id}) if err != nil { return false, err } @@ -127,26 +128,26 @@ func (r *repository) buildCountQuery(query string) string { return "SELECT COUNT(*) as count FROM (" + query + ") as temp" } -func (r *repository) runCountQuery(query string, args []interface{}) (int, error) { +func (r *repository) runCountQuery(ctx context.Context, query string, args []interface{}) (int, error) { result := struct { Int int `db:"count"` }{0} // Perform query and fetch result - if err := r.tx.Get(&result, query, args...); err != nil && !errors.Is(err, sql.ErrNoRows) { + if err := r.tx.Get(ctx, &result, query, args...); err != nil && !errors.Is(err, sql.ErrNoRows) { return 0, err } return result.Int, nil } -func (r *repository) runIdsQuery(query string, args []interface{}) ([]int, error) { +func (r *repository) runIdsQuery(ctx context.Context, query string, args []interface{}) ([]int, error) { var result []struct { Int int `db:"id"` } - if err := r.tx.Select(&result, query, args...); err != nil && !errors.Is(err, sql.ErrNoRows) { - return []int{}, err + if err := r.tx.Select(ctx, &result, query, args...); err != nil && !errors.Is(err, sql.ErrNoRows) { + return []int{}, fmt.Errorf("running query: %s [%v]: %w", query, args, err) } vsm := make([]int, len(result)) @@ -156,24 +157,8 @@ func (r *repository) runIdsQuery(query string, args []interface{}) ([]int, error return vsm, nil } -func (r *repository) runSumQuery(query string, args []interface{}) (float64, error) { - // Perform query and fetch result - result := struct { - Float64 float64 `db:"sum"` - }{0} - - // Perform query and fetch result - if err := r.tx.Get(&result, query, args...); err != nil && !errors.Is(err, sql.ErrNoRows) { - return 0, err - } - - return result.Float64, nil -} - -func (r *repository) queryFunc(query string, args []interface{}, single bool, f func(rows *sqlx.Rows) error) error { - logger.Tracef("SQL: %s, args: %v", query, args) - - rows, err := r.tx.Queryx(query, args...) +func (r *repository) queryFunc(ctx context.Context, query string, args []interface{}, single bool, f func(rows *sqlx.Rows) error) error { + rows, err := r.tx.Queryx(ctx, query, args...) if err != nil && !errors.Is(err, sql.ErrNoRows) { return err @@ -196,8 +181,8 @@ func (r *repository) queryFunc(query string, args []interface{}, single bool, f return nil } -func (r *repository) query(query string, args []interface{}, out objectList) error { - return r.queryFunc(query, args, false, func(rows *sqlx.Rows) error { +func (r *repository) query(ctx context.Context, query string, args []interface{}, out objectList) error { + return r.queryFunc(ctx, query, args, false, func(rows *sqlx.Rows) error { object := out.New() if err := rows.StructScan(object); err != nil { return err @@ -207,17 +192,21 @@ func (r *repository) query(query string, args []interface{}, out objectList) err }) } -func (r *repository) queryStruct(query string, args []interface{}, out interface{}) error { - return r.queryFunc(query, args, true, func(rows *sqlx.Rows) error { +func (r *repository) queryStruct(ctx context.Context, query string, args []interface{}, out interface{}) error { + if err := r.queryFunc(ctx, query, args, true, func(rows *sqlx.Rows) error { if err := rows.StructScan(out); err != nil { return err } return nil - }) + }); err != nil { + return fmt.Errorf("executing query: %s [%v]: %w", query, args, err) + } + + return nil } -func (r *repository) querySimple(query string, args []interface{}, out interface{}) error { - rows, err := r.tx.Queryx(query, args...) +func (r *repository) querySimple(ctx context.Context, query string, args []interface{}, out interface{}) error { + rows, err := r.tx.Queryx(ctx, query, args...) if err != nil && !errors.Is(err, sql.ErrNoRows) { return err @@ -249,7 +238,7 @@ func (r *repository) buildQueryBody(body string, whereClauses []string, havingCl return body } -func (r *repository) executeFindQuery(body string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { +func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { body = r.buildQueryBody(body, whereClauses, havingClauses) withClause := "" @@ -265,15 +254,13 @@ func (r *repository) executeFindQuery(body string, args []interface{}, sortAndPa idsQuery := withClause + body + sortAndPagination // Perform query and fetch result - logger.Tracef("SQL: %s, args: %v", idsQuery, args) - var countResult int var countErr error var idsResult []int var idsErr error - countResult, countErr = r.runCountQuery(countQuery, args) - idsResult, idsErr = r.runIdsQuery(idsQuery, args) + countResult, countErr = r.runCountQuery(ctx, countQuery, args) + idsResult, idsErr = r.runIdsQuery(ctx, idsQuery, args) if countErr != nil { return nil, 0, fmt.Errorf("error executing count query with SQL: %s, args: %v, error: %s", countQuery, args, countErr.Error()) @@ -318,23 +305,23 @@ type joinRepository struct { fkColumn string } -func (r *joinRepository) getIDs(id int) ([]int, error) { +func (r *joinRepository) getIDs(ctx context.Context, id int) ([]int, error) { query := fmt.Sprintf(`SELECT %s as id from %s WHERE %s = ?`, r.fkColumn, r.tableName, r.idColumn) - return r.runIdsQuery(query, []interface{}{id}) + return r.runIdsQuery(ctx, query, []interface{}{id}) } -func (r *joinRepository) insert(id, foreignID int) (sql.Result, error) { +func (r *joinRepository) insert(ctx context.Context, id, foreignID int) (sql.Result, error) { stmt := fmt.Sprintf("INSERT INTO %s (%s, %s) VALUES (?, ?)", r.tableName, r.idColumn, r.fkColumn) - return r.tx.Exec(stmt, id, foreignID) + return r.tx.Exec(ctx, stmt, id, foreignID) } -func (r *joinRepository) replace(id int, foreignIDs []int) error { - if err := r.destroy([]int{id}); err != nil { +func (r *joinRepository) replace(ctx context.Context, id int, foreignIDs []int) error { + if err := r.destroy(ctx, []int{id}); err != nil { return err } for _, fk := range foreignIDs { - if _, err := r.insert(id, fk); err != nil { + if _, err := r.insert(ctx, id, fk); err != nil { return err } } @@ -347,20 +334,20 @@ type imageRepository struct { imageColumn string } -func (r *imageRepository) get(id int) ([]byte, error) { +func (r *imageRepository) get(ctx context.Context, id int) ([]byte, error) { query := fmt.Sprintf("SELECT %s from %s WHERE %s = ?", r.imageColumn, r.tableName, r.idColumn) var ret []byte - err := r.querySimple(query, []interface{}{id}, &ret) + err := r.querySimple(ctx, query, []interface{}{id}, &ret) return ret, err } -func (r *imageRepository) replace(id int, image []byte) error { - if err := r.destroy([]int{id}); err != nil { +func (r *imageRepository) replace(ctx context.Context, id int, image []byte) error { + if err := r.destroy(ctx, []int{id}); err != nil { return err } stmt := fmt.Sprintf("INSERT INTO %s (%s, %s) VALUES (?, ?)", r.tableName, r.idColumn, r.imageColumn) - _, err := r.tx.Exec(stmt, id, image) + _, err := r.tx.Exec(ctx, stmt, id, image) return err } @@ -369,10 +356,10 @@ type captionRepository struct { repository } -func (r *captionRepository) get(id int) ([]*models.SceneCaption, error) { - query := fmt.Sprintf("SELECT %s, %s, %s from %s WHERE %s = ?", sceneCaptionCodeColumn, sceneCaptionFilenameColumn, sceneCaptionTypeColumn, r.tableName, r.idColumn) - var ret []*models.SceneCaption - err := r.queryFunc(query, []interface{}{id}, false, func(rows *sqlx.Rows) error { +func (r *captionRepository) get(ctx context.Context, id file.ID) ([]*models.VideoCaption, error) { + query := fmt.Sprintf("SELECT %s, %s, %s from %s WHERE %s = ?", captionCodeColumn, captionFilenameColumn, captionTypeColumn, r.tableName, r.idColumn) + var ret []*models.VideoCaption + err := r.queryFunc(ctx, query, []interface{}{id}, false, func(rows *sqlx.Rows) error { var captionCode string var captionFilename string var captionType string @@ -381,7 +368,7 @@ func (r *captionRepository) get(id int) ([]*models.SceneCaption, error) { return err } - caption := &models.SceneCaption{ + caption := &models.VideoCaption{ LanguageCode: captionCode, Filename: captionFilename, CaptionType: captionType, @@ -392,18 +379,18 @@ func (r *captionRepository) get(id int) ([]*models.SceneCaption, error) { return ret, err } -func (r *captionRepository) insert(id int, caption *models.SceneCaption) (sql.Result, error) { - stmt := fmt.Sprintf("INSERT INTO %s (%s, %s, %s, %s) VALUES (?, ?, ?, ?)", r.tableName, r.idColumn, sceneCaptionCodeColumn, sceneCaptionFilenameColumn, sceneCaptionTypeColumn) - return r.tx.Exec(stmt, id, caption.LanguageCode, caption.Filename, caption.CaptionType) +func (r *captionRepository) insert(ctx context.Context, id file.ID, caption *models.VideoCaption) (sql.Result, error) { + stmt := fmt.Sprintf("INSERT INTO %s (%s, %s, %s, %s) VALUES (?, ?, ?, ?)", r.tableName, r.idColumn, captionCodeColumn, captionFilenameColumn, captionTypeColumn) + return r.tx.Exec(ctx, stmt, id, caption.LanguageCode, caption.Filename, caption.CaptionType) } -func (r *captionRepository) replace(id int, captions []*models.SceneCaption) error { - if err := r.destroy([]int{id}); err != nil { +func (r *captionRepository) replace(ctx context.Context, id file.ID, captions []*models.VideoCaption) error { + if err := r.destroy(ctx, []int{int(id)}); err != nil { return err } for _, caption := range captions { - if _, err := r.insert(id, caption); err != nil { + if _, err := r.insert(ctx, id, caption); err != nil { return err } } @@ -416,10 +403,10 @@ type stringRepository struct { stringColumn string } -func (r *stringRepository) get(id int) ([]string, error) { +func (r *stringRepository) get(ctx context.Context, id int) ([]string, error) { query := fmt.Sprintf("SELECT %s from %s WHERE %s = ?", r.stringColumn, r.tableName, r.idColumn) var ret []string - err := r.queryFunc(query, []interface{}{id}, false, func(rows *sqlx.Rows) error { + err := r.queryFunc(ctx, query, []interface{}{id}, false, func(rows *sqlx.Rows) error { var out string if err := rows.Scan(&out); err != nil { return err @@ -431,18 +418,18 @@ func (r *stringRepository) get(id int) ([]string, error) { return ret, err } -func (r *stringRepository) insert(id int, s string) (sql.Result, error) { +func (r *stringRepository) insert(ctx context.Context, id int, s string) (sql.Result, error) { stmt := fmt.Sprintf("INSERT INTO %s (%s, %s) VALUES (?, ?)", r.tableName, r.idColumn, r.stringColumn) - return r.tx.Exec(stmt, id, s) + return r.tx.Exec(ctx, stmt, id, s) } -func (r *stringRepository) replace(id int, newStrings []string) error { - if err := r.destroy([]int{id}); err != nil { +func (r *stringRepository) replace(ctx context.Context, id int, newStrings []string) error { + if err := r.destroy(ctx, []int{id}); err != nil { return err } for _, s := range newStrings { - if _, err := r.insert(id, s); err != nil { + if _, err := r.insert(ctx, id, s); err != nil { return err } } @@ -454,31 +441,31 @@ type stashIDRepository struct { repository } -type stashIDs []*models.StashID +type stashIDs []models.StashID func (s *stashIDs) Append(o interface{}) { - *s = append(*s, o.(*models.StashID)) + *s = append(*s, *o.(*models.StashID)) } func (s *stashIDs) New() interface{} { return &models.StashID{} } -func (r *stashIDRepository) get(id int) ([]*models.StashID, error) { +func (r *stashIDRepository) get(ctx context.Context, id int) ([]models.StashID, error) { query := fmt.Sprintf("SELECT stash_id, endpoint from %s WHERE %s = ?", r.tableName, r.idColumn) var ret stashIDs - err := r.query(query, []interface{}{id}, &ret) - return []*models.StashID(ret), err + err := r.query(ctx, query, []interface{}{id}, &ret) + return []models.StashID(ret), err } -func (r *stashIDRepository) replace(id int, newIDs []models.StashID) error { - if err := r.destroy([]int{id}); err != nil { +func (r *stashIDRepository) replace(ctx context.Context, id int, newIDs []models.StashID) error { + if err := r.destroy(ctx, []int{id}); err != nil { return err } query := fmt.Sprintf("INSERT INTO %s (%s, endpoint, stash_id) VALUES (?, ?, ?)", r.tableName, r.idColumn) for _, stashID := range newIDs { - _, err := r.tx.Exec(query, id, stashID.Endpoint, stashID.StashID) + _, err := r.tx.Exec(ctx, query, id, stashID.Endpoint, stashID.StashID) if err != nil { return err } @@ -486,6 +473,96 @@ func (r *stashIDRepository) replace(id int, newIDs []models.StashID) error { return nil } +type filesRepository struct { + repository +} + +type relatedFileRow struct { + ID int `db:"id"` + FileID file.ID `db:"file_id"` + Primary bool `db:"primary"` +} + +func (r *filesRepository) getMany(ctx context.Context, ids []int, primaryOnly bool) ([][]file.ID, error) { + var primaryClause string + if primaryOnly { + primaryClause = " AND `primary` = 1" + } + + query := fmt.Sprintf("SELECT %s as id, file_id, `primary` from %s WHERE %[1]s IN %[3]s%s", r.idColumn, r.tableName, getInBinding(len(ids)), primaryClause) + + idi := make([]interface{}, len(ids)) + for i, id := range ids { + idi[i] = id + } + + var fileRows []relatedFileRow + if err := r.queryFunc(ctx, query, idi, false, func(rows *sqlx.Rows) error { + var f relatedFileRow + + if err := rows.StructScan(&f); err != nil { + return err + } + + fileRows = append(fileRows, f) + + return nil + }); err != nil { + return nil, err + } + + ret := make([][]file.ID, len(ids)) + idToIndex := make(map[int]int) + for i, id := range ids { + idToIndex[id] = i + } + + for _, row := range fileRows { + id := row.ID + fileID := row.FileID + + if row.Primary { + // prepend to list + ret[idToIndex[id]] = append([]file.ID{fileID}, ret[idToIndex[id]]...) + } else { + ret[idToIndex[id]] = append(ret[idToIndex[id]], row.FileID) + } + } + + return ret, nil +} + +func (r *filesRepository) get(ctx context.Context, id int) ([]file.ID, error) { + query := fmt.Sprintf("SELECT file_id, `primary` from %s WHERE %s = ?", r.tableName, r.idColumn) + + type relatedFile struct { + FileID file.ID `db:"file_id"` + Primary bool `db:"primary"` + } + + var ret []file.ID + if err := r.queryFunc(ctx, query, []interface{}{id}, false, func(rows *sqlx.Rows) error { + var f relatedFile + + if err := rows.StructScan(&f); err != nil { + return err + } + + if f.Primary { + // prepend to list + ret = append([]file.ID{f.FileID}, ret...) + } else { + ret = append(ret, f.FileID) + } + + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + func listKeys(i interface{}, addPrefix bool) string { var query []string v := reflect.ValueOf(i) @@ -528,10 +605,10 @@ func updateSet(i interface{}, partial bool) string { return strings.Join(query, ", ") } -func updateSetMap(m map[string]interface{}) string { - var query []string - for k := range m { - query = append(query, fmt.Sprintf("%s=:%s", k, k)) - } - return strings.Join(query, ", ") -} +// func updateSetMap(m map[string]interface{}) string { +// var query []string +// for k := range m { +// query = append(query, fmt.Sprintf("%s=:%s", k, k)) +// } +// return strings.Join(query, ", ") +// } diff --git a/pkg/sqlite/saved_filter.go b/pkg/sqlite/saved_filter.go index 6c507bee3..54fddcbc8 100644 --- a/pkg/sqlite/saved_filter.go +++ b/pkg/sqlite/saved_filter.go @@ -1,6 +1,7 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" @@ -15,42 +16,39 @@ type savedFilterQueryBuilder struct { repository } -func NewSavedFilterReaderWriter(tx dbi) *savedFilterQueryBuilder { - return &savedFilterQueryBuilder{ - repository{ - tx: tx, - tableName: savedFilterTable, - idColumn: idColumn, - }, - } +var SavedFilterReaderWriter = &savedFilterQueryBuilder{ + repository{ + tableName: savedFilterTable, + idColumn: idColumn, + }, } -func (qb *savedFilterQueryBuilder) Create(newObject models.SavedFilter) (*models.SavedFilter, error) { +func (qb *savedFilterQueryBuilder) Create(ctx context.Context, newObject models.SavedFilter) (*models.SavedFilter, error) { var ret models.SavedFilter - if err := qb.insertObject(newObject, &ret); err != nil { + if err := qb.insertObject(ctx, newObject, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *savedFilterQueryBuilder) Update(updatedObject models.SavedFilter) (*models.SavedFilter, error) { +func (qb *savedFilterQueryBuilder) Update(ctx context.Context, updatedObject models.SavedFilter) (*models.SavedFilter, error) { const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } var ret models.SavedFilter - if err := qb.get(updatedObject.ID, &ret); err != nil { + if err := qb.getByID(ctx, updatedObject.ID, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *savedFilterQueryBuilder) SetDefault(obj models.SavedFilter) (*models.SavedFilter, error) { +func (qb *savedFilterQueryBuilder) SetDefault(ctx context.Context, obj models.SavedFilter) (*models.SavedFilter, error) { // find the existing default - existing, err := qb.FindDefault(obj.Mode) + existing, err := qb.FindDefault(ctx, obj.Mode) if err != nil { return nil, err @@ -60,19 +58,19 @@ func (qb *savedFilterQueryBuilder) SetDefault(obj models.SavedFilter) (*models.S if existing != nil { obj.ID = existing.ID - return qb.Update(obj) + return qb.Update(ctx, obj) } - return qb.Create(obj) + return qb.Create(ctx, obj) } -func (qb *savedFilterQueryBuilder) Destroy(id int) error { - return qb.destroyExisting([]int{id}) +func (qb *savedFilterQueryBuilder) Destroy(ctx context.Context, id int) error { + return qb.destroyExisting(ctx, []int{id}) } -func (qb *savedFilterQueryBuilder) Find(id int) (*models.SavedFilter, error) { +func (qb *savedFilterQueryBuilder) Find(ctx context.Context, id int) (*models.SavedFilter, error) { var ret models.SavedFilter - if err := qb.get(id, &ret); err != nil { + if err := qb.getByID(ctx, id, &ret); err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -81,10 +79,10 @@ func (qb *savedFilterQueryBuilder) Find(id int) (*models.SavedFilter, error) { return &ret, nil } -func (qb *savedFilterQueryBuilder) FindMany(ids []int, ignoreNotFound bool) ([]*models.SavedFilter, error) { +func (qb *savedFilterQueryBuilder) FindMany(ctx context.Context, ids []int, ignoreNotFound bool) ([]*models.SavedFilter, error) { var filters []*models.SavedFilter for _, id := range ids { - filter, err := qb.Find(id) + filter, err := qb.Find(ctx, id) if err != nil { return nil, err } @@ -99,24 +97,24 @@ func (qb *savedFilterQueryBuilder) FindMany(ids []int, ignoreNotFound bool) ([]* return filters, nil } -func (qb *savedFilterQueryBuilder) FindByMode(mode models.FilterMode) ([]*models.SavedFilter, error) { +func (qb *savedFilterQueryBuilder) FindByMode(ctx context.Context, mode models.FilterMode) ([]*models.SavedFilter, error) { // exclude empty-named filters - these are the internal default filters query := fmt.Sprintf(`SELECT * FROM %s WHERE mode = ? AND name != ?`, savedFilterTable) var ret models.SavedFilters - if err := qb.query(query, []interface{}{mode, savedFilterDefaultName}, &ret); err != nil { + if err := qb.query(ctx, query, []interface{}{mode, savedFilterDefaultName}, &ret); err != nil { return nil, err } return []*models.SavedFilter(ret), nil } -func (qb *savedFilterQueryBuilder) FindDefault(mode models.FilterMode) (*models.SavedFilter, error) { +func (qb *savedFilterQueryBuilder) FindDefault(ctx context.Context, mode models.FilterMode) (*models.SavedFilter, error) { query := fmt.Sprintf(`SELECT * FROM %s WHERE mode = ? AND name = ?`, savedFilterTable) var ret models.SavedFilters - if err := qb.query(query, []interface{}{mode, savedFilterDefaultName}, &ret); err != nil { + if err := qb.query(ctx, query, []interface{}{mode, savedFilterDefaultName}, &ret); err != nil { return nil, err } @@ -127,9 +125,9 @@ func (qb *savedFilterQueryBuilder) FindDefault(mode models.FilterMode) (*models. return nil, nil } -func (qb *savedFilterQueryBuilder) All() ([]*models.SavedFilter, error) { +func (qb *savedFilterQueryBuilder) All(ctx context.Context) ([]*models.SavedFilter, error) { var ret models.SavedFilters - if err := qb.query(selectAll(savedFilterTable), nil, &ret); err != nil { + if err := qb.query(ctx, selectAll(savedFilterTable), nil, &ret); err != nil { return nil, err } diff --git a/pkg/sqlite/saved_filter_test.go b/pkg/sqlite/saved_filter_test.go index 5ec049290..c22b374fb 100644 --- a/pkg/sqlite/saved_filter_test.go +++ b/pkg/sqlite/saved_filter_test.go @@ -4,15 +4,17 @@ package sqlite_test import ( + "context" "testing" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sqlite" "github.com/stretchr/testify/assert" ) func TestSavedFilterFind(t *testing.T) { - withTxn(func(r models.Repository) error { - savedFilter, err := r.SavedFilter().Find(savedFilterIDs[savedFilterIdxImage]) + withTxn(func(ctx context.Context) error { + savedFilter, err := sqlite.SavedFilterReaderWriter.Find(ctx, savedFilterIDs[savedFilterIdxImage]) if err != nil { t.Errorf("Error finding saved filter: %s", err.Error()) @@ -25,8 +27,8 @@ func TestSavedFilterFind(t *testing.T) { } func TestSavedFilterFindByMode(t *testing.T) { - withTxn(func(r models.Repository) error { - savedFilters, err := r.SavedFilter().FindByMode(models.FilterModeScenes) + withTxn(func(ctx context.Context) error { + savedFilters, err := sqlite.SavedFilterReaderWriter.FindByMode(ctx, models.FilterModeScenes) if err != nil { t.Errorf("Error finding saved filters: %s", err.Error()) @@ -45,8 +47,8 @@ func TestSavedFilterDestroy(t *testing.T) { var id int // create the saved filter to destroy - withTxn(func(r models.Repository) error { - created, err := r.SavedFilter().Create(models.SavedFilter{ + withTxn(func(ctx context.Context) error { + created, err := sqlite.SavedFilterReaderWriter.Create(ctx, models.SavedFilter{ Name: filterName, Mode: models.FilterModeScenes, Filter: testFilter, @@ -59,15 +61,15 @@ func TestSavedFilterDestroy(t *testing.T) { return err }) - withTxn(func(r models.Repository) error { - qb := r.SavedFilter() + withTxn(func(ctx context.Context) error { + qb := sqlite.SavedFilterReaderWriter - return qb.Destroy(id) + return qb.Destroy(ctx, id) }) // now try to find it - withTxn(func(r models.Repository) error { - found, err := r.SavedFilter().Find(id) + withTxn(func(ctx context.Context) error { + found, err := sqlite.SavedFilterReaderWriter.Find(ctx, id) if err == nil { assert.Nil(t, found) } @@ -77,8 +79,8 @@ func TestSavedFilterDestroy(t *testing.T) { } func TestSavedFilterFindDefault(t *testing.T) { - withTxn(func(r models.Repository) error { - def, err := r.SavedFilter().FindDefault(models.FilterModeScenes) + withTxn(func(ctx context.Context) error { + def, err := sqlite.SavedFilterReaderWriter.FindDefault(ctx, models.FilterModeScenes) if err == nil { assert.Equal(t, savedFilterIDs[savedFilterIdxDefaultScene], def.ID) } @@ -90,8 +92,8 @@ func TestSavedFilterFindDefault(t *testing.T) { func TestSavedFilterSetDefault(t *testing.T) { const newFilter = "foo" - withTxn(func(r models.Repository) error { - _, err := r.SavedFilter().SetDefault(models.SavedFilter{ + withTxn(func(ctx context.Context) error { + _, err := sqlite.SavedFilterReaderWriter.SetDefault(ctx, models.SavedFilter{ Mode: models.FilterModeMovies, Filter: newFilter, }) @@ -100,8 +102,8 @@ func TestSavedFilterSetDefault(t *testing.T) { }) var defID int - withTxn(func(r models.Repository) error { - def, err := r.SavedFilter().FindDefault(models.FilterModeMovies) + withTxn(func(ctx context.Context) error { + def, err := sqlite.SavedFilterReaderWriter.FindDefault(ctx, models.FilterModeMovies) if err == nil { defID = def.ID assert.Equal(t, newFilter, def.Filter) @@ -111,8 +113,8 @@ func TestSavedFilterSetDefault(t *testing.T) { }) // destroy it again - withTxn(func(r models.Repository) error { - return r.SavedFilter().Destroy(defID) + withTxn(func(ctx context.Context) error { + return sqlite.SavedFilterReaderWriter.Destroy(ctx, defID) }) } diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index cb5085dfd..f096db130 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -1,346 +1,753 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" + "path/filepath" "strconv" "strings" + "time" + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" "github.com/jmoiron/sqlx" + "gopkg.in/guregu/null.v4" + "gopkg.in/guregu/null.v4/zero" + + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" "github.com/stashapp/stash/pkg/utils" ) -const sceneTable = "scenes" -const sceneIDColumn = "scene_id" -const performersScenesTable = "performers_scenes" -const scenesTagsTable = "scenes_tags" -const scenesGalleriesTable = "scenes_galleries" -const moviesScenesTable = "movies_scenes" - -const sceneCaptionsTable = "scene_captions" -const sceneCaptionCodeColumn = "language_code" -const sceneCaptionFilenameColumn = "filename" -const sceneCaptionTypeColumn = "caption_type" - -var scenesForPerformerQuery = selectAll(sceneTable) + ` -LEFT JOIN performers_scenes as performers_join on performers_join.scene_id = scenes.id -WHERE performers_join.performer_id = ? -GROUP BY scenes.id -` - -var countScenesForPerformerQuery = ` -SELECT performer_id FROM performers_scenes as performers_join -WHERE performer_id = ? -GROUP BY scene_id -` - -var scenesForStudioQuery = selectAll(sceneTable) + ` -JOIN studios ON studios.id = scenes.studio_id -WHERE studios.id = ? -GROUP BY scenes.id -` -var scenesForMovieQuery = selectAll(sceneTable) + ` -LEFT JOIN movies_scenes as movies_join on movies_join.scene_id = scenes.id -WHERE movies_join.movie_id = ? -GROUP BY scenes.id -` - -var countScenesForTagQuery = ` -SELECT tag_id AS id FROM scenes_tags -WHERE scenes_tags.tag_id = ? -GROUP BY scenes_tags.scene_id -` - -var scenesForGalleryQuery = selectAll(sceneTable) + ` -LEFT JOIN scenes_galleries as galleries_join on galleries_join.scene_id = scenes.id -WHERE galleries_join.gallery_id = ? -GROUP BY scenes.id -` - -var countScenesForMissingChecksumQuery = ` -SELECT id FROM scenes -WHERE scenes.checksum is null -` - -var countScenesForMissingOSHashQuery = ` -SELECT id FROM scenes -WHERE scenes.oshash is null -` +const ( + sceneTable = "scenes" + scenesFilesTable = "scenes_files" + sceneIDColumn = "scene_id" + performersScenesTable = "performers_scenes" + scenesTagsTable = "scenes_tags" + scenesGalleriesTable = "scenes_galleries" + moviesScenesTable = "movies_scenes" +) var findExactDuplicateQuery = ` -SELECT GROUP_CONCAT(id) as ids +SELECT GROUP_CONCAT(scenes.id) as ids FROM scenes -WHERE phash IS NOT NULL -GROUP BY phash -HAVING COUNT(phash) > 1 -ORDER BY SUM(size) DESC; +INNER JOIN scenes_files ON (scenes.id = scenes_files.scene_id) +INNER JOIN files ON (scenes_files.file_id = files.id) +INNER JOIN files_fingerprints ON (scenes_files.file_id = files_fingerprints.file_id AND files_fingerprints.type = 'phash') +GROUP BY files_fingerprints.fingerprint +HAVING COUNT(files_fingerprints.fingerprint) > 1 AND COUNT(DISTINCT scenes.id) > 1 +ORDER BY SUM(files.size) DESC; ` var findAllPhashesQuery = ` -SELECT id, phash +SELECT scenes.id as id, files_fingerprints.fingerprint as phash FROM scenes -WHERE phash IS NOT NULL -ORDER BY size DESC +INNER JOIN scenes_files ON (scenes.id = scenes_files.scene_id) +INNER JOIN files ON (scenes_files.file_id = files.id) +INNER JOIN files_fingerprints ON (scenes_files.file_id = files_fingerprints.file_id AND files_fingerprints.type = 'phash') +ORDER BY files.size DESC ` -type sceneQueryBuilder struct { - repository +type sceneRow struct { + ID int `db:"id" goqu:"skipinsert"` + Title zero.String `db:"title"` + Details zero.String `db:"details"` + URL zero.String `db:"url"` + Date models.SQLiteDate `db:"date"` + Rating null.Int `db:"rating"` + Organized bool `db:"organized"` + OCounter int `db:"o_counter"` + StudioID null.Int `db:"studio_id,omitempty"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` } -func NewSceneReaderWriter(tx dbi) *sceneQueryBuilder { - return &sceneQueryBuilder{ - repository{ - tx: tx, +func (r *sceneRow) fromScene(o models.Scene) { + r.ID = o.ID + r.Title = zero.StringFrom(o.Title) + r.Details = zero.StringFrom(o.Details) + r.URL = zero.StringFrom(o.URL) + if o.Date != nil { + _ = r.Date.Scan(o.Date.Time) + } + r.Rating = intFromPtr(o.Rating) + r.Organized = o.Organized + r.OCounter = o.OCounter + r.StudioID = intFromPtr(o.StudioID) + r.CreatedAt = o.CreatedAt + r.UpdatedAt = o.UpdatedAt +} + +type sceneQueryRow struct { + sceneRow + PrimaryFileID null.Int `db:"primary_file_id"` + PrimaryFileFolderPath zero.String `db:"primary_file_folder_path"` + PrimaryFileBasename zero.String `db:"primary_file_basename"` + PrimaryFileOshash zero.String `db:"primary_file_oshash"` + PrimaryFileChecksum zero.String `db:"primary_file_checksum"` +} + +func (r *sceneQueryRow) resolve() *models.Scene { + ret := &models.Scene{ + ID: r.ID, + Title: r.Title.String, + Details: r.Details.String, + URL: r.URL.String, + Date: r.Date.DatePtr(), + Rating: nullIntPtr(r.Rating), + Organized: r.Organized, + OCounter: r.OCounter, + StudioID: nullIntPtr(r.StudioID), + + PrimaryFileID: nullIntFileIDPtr(r.PrimaryFileID), + OSHash: r.PrimaryFileOshash.String, + Checksum: r.PrimaryFileChecksum.String, + + CreatedAt: r.CreatedAt, + UpdatedAt: r.UpdatedAt, + } + + if r.PrimaryFileFolderPath.Valid && r.PrimaryFileBasename.Valid { + ret.Path = filepath.Join(r.PrimaryFileFolderPath.String, r.PrimaryFileBasename.String) + } + + return ret +} + +type sceneRowRecord struct { + updateRecord +} + +func (r *sceneRowRecord) fromPartial(o models.ScenePartial) { + r.setNullString("title", o.Title) + r.setNullString("details", o.Details) + r.setNullString("url", o.URL) + r.setSQLiteDate("date", o.Date) + r.setNullInt("rating", o.Rating) + r.setBool("organized", o.Organized) + r.setInt("o_counter", o.OCounter) + r.setNullInt("studio_id", o.StudioID) + r.setTime("created_at", o.CreatedAt) + r.setTime("updated_at", o.UpdatedAt) +} + +type SceneStore struct { + repository + + tableMgr *table + oCounterManager + + fileStore *FileStore +} + +func NewSceneStore(fileStore *FileStore) *SceneStore { + return &SceneStore{ + repository: repository{ tableName: sceneTable, idColumn: idColumn, }, + + tableMgr: sceneTableMgr, + oCounterManager: oCounterManager{sceneTableMgr}, + fileStore: fileStore, } } -func (qb *sceneQueryBuilder) Create(newObject models.Scene) (*models.Scene, error) { - var ret models.Scene - if err := qb.insertObject(newObject, &ret); err != nil { - return nil, err +func (qb *SceneStore) table() exp.IdentifierExpression { + return qb.tableMgr.table +} + +func (qb *SceneStore) Create(ctx context.Context, newObject *models.Scene, fileIDs []file.ID) error { + var r sceneRow + r.fromScene(*newObject) + + id, err := qb.tableMgr.insertID(ctx, r) + if err != nil { + return err } - return &ret, nil -} - -func (qb *sceneQueryBuilder) Update(updatedObject models.ScenePartial) (*models.Scene, error) { - const partial = true - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { - return nil, err + if len(fileIDs) > 0 { + const firstPrimary = true + if err := scenesFilesTableMgr.insertJoins(ctx, id, firstPrimary, fileIDs); err != nil { + return err + } } - return qb.find(updatedObject.ID) -} - -func (qb *sceneQueryBuilder) UpdateFull(updatedObject models.Scene) (*models.Scene, error) { - const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { - return nil, err + if newObject.PerformerIDs.Loaded() { + if err := scenesPerformersTableMgr.insertJoins(ctx, id, newObject.PerformerIDs.List()); err != nil { + return err + } + } + if newObject.TagIDs.Loaded() { + if err := scenesTagsTableMgr.insertJoins(ctx, id, newObject.TagIDs.List()); err != nil { + return err + } } - return qb.find(updatedObject.ID) + if newObject.GalleryIDs.Loaded() { + if err := scenesGalleriesTableMgr.insertJoins(ctx, id, newObject.GalleryIDs.List()); err != nil { + return err + } + } + + if newObject.StashIDs.Loaded() { + if err := scenesStashIDsTableMgr.insertJoins(ctx, id, newObject.StashIDs.List()); err != nil { + return err + } + } + + if newObject.Movies.Loaded() { + if err := scenesMoviesTableMgr.insertJoins(ctx, id, newObject.Movies.List()); err != nil { + return err + } + } + + updated, err := qb.find(ctx, id) + if err != nil { + return fmt.Errorf("finding after create: %w", err) + } + + *newObject = *updated + + return nil } -func (qb *sceneQueryBuilder) UpdateFileModTime(id int, modTime models.NullSQLiteTimestamp) error { - return qb.updateMap(id, map[string]interface{}{ - "file_mod_time": modTime, - }) -} - -func (qb *sceneQueryBuilder) captionRepository() *captionRepository { - return &captionRepository{ - repository: repository{ - tx: qb.tx, - tableName: sceneCaptionsTable, - idColumn: sceneIDColumn, +func (qb *SceneStore) UpdatePartial(ctx context.Context, id int, partial models.ScenePartial) (*models.Scene, error) { + r := sceneRowRecord{ + updateRecord{ + Record: make(exp.Record), }, } -} -func (qb *sceneQueryBuilder) GetCaptions(sceneID int) ([]*models.SceneCaption, error) { - return qb.captionRepository().get(sceneID) -} + r.fromPartial(partial) -func (qb *sceneQueryBuilder) UpdateCaptions(sceneID int, captions []*models.SceneCaption) error { - return qb.captionRepository().replace(sceneID, captions) - -} - -func (qb *sceneQueryBuilder) IncrementOCounter(id int) (int, error) { - _, err := qb.tx.Exec( - `UPDATE scenes SET o_counter = o_counter + 1 WHERE scenes.id = ?`, - id, - ) - if err != nil { - return 0, err + if len(r.Record) > 0 { + if err := qb.tableMgr.updateByID(ctx, id, r.Record); err != nil { + return nil, err + } } - scene, err := qb.find(id) - if err != nil { - return 0, err + if partial.PerformerIDs != nil { + if err := scenesPerformersTableMgr.modifyJoins(ctx, id, partial.PerformerIDs.IDs, partial.PerformerIDs.Mode); err != nil { + return nil, err + } + } + if partial.TagIDs != nil { + if err := scenesTagsTableMgr.modifyJoins(ctx, id, partial.TagIDs.IDs, partial.TagIDs.Mode); err != nil { + return nil, err + } + } + if partial.GalleryIDs != nil { + if err := scenesGalleriesTableMgr.modifyJoins(ctx, id, partial.GalleryIDs.IDs, partial.GalleryIDs.Mode); err != nil { + return nil, err + } + } + if partial.StashIDs != nil { + if err := scenesStashIDsTableMgr.modifyJoins(ctx, id, partial.StashIDs.StashIDs, partial.StashIDs.Mode); err != nil { + return nil, err + } + } + if partial.MovieIDs != nil { + if err := scenesMoviesTableMgr.modifyJoins(ctx, id, partial.MovieIDs.Movies, partial.MovieIDs.Mode); err != nil { + return nil, err + } + } + if partial.PrimaryFileID != nil { + if err := scenesFilesTableMgr.setPrimary(ctx, id, *partial.PrimaryFileID); err != nil { + return nil, err + } } - return scene.OCounter, nil + return qb.Find(ctx, id) } -func (qb *sceneQueryBuilder) DecrementOCounter(id int) (int, error) { - _, err := qb.tx.Exec( - `UPDATE scenes SET o_counter = o_counter - 1 WHERE scenes.id = ? and scenes.o_counter > 0`, - id, - ) - if err != nil { - return 0, err +func (qb *SceneStore) Update(ctx context.Context, updatedObject *models.Scene) error { + var r sceneRow + r.fromScene(*updatedObject) + + if err := qb.tableMgr.updateByID(ctx, updatedObject.ID, r); err != nil { + return err } - scene, err := qb.find(id) - if err != nil { - return 0, err + if updatedObject.PerformerIDs.Loaded() { + if err := scenesPerformersTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.PerformerIDs.List()); err != nil { + return err + } } - return scene.OCounter, nil + if updatedObject.TagIDs.Loaded() { + if err := scenesTagsTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.TagIDs.List()); err != nil { + return err + } + } + + if updatedObject.GalleryIDs.Loaded() { + if err := scenesGalleriesTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.GalleryIDs.List()); err != nil { + return err + } + } + + if updatedObject.StashIDs.Loaded() { + if err := scenesStashIDsTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.StashIDs.List()); err != nil { + return err + } + } + + if updatedObject.Movies.Loaded() { + if err := scenesMoviesTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.Movies.List()); err != nil { + return err + } + } + + if updatedObject.Files.Loaded() { + fileIDs := make([]file.ID, len(updatedObject.Files.List())) + for i, f := range updatedObject.Files.List() { + fileIDs[i] = f.ID + } + + if err := scenesFilesTableMgr.replaceJoins(ctx, updatedObject.ID, fileIDs); err != nil { + return err + } + } + + return nil } -func (qb *sceneQueryBuilder) ResetOCounter(id int) (int, error) { - _, err := qb.tx.Exec( - `UPDATE scenes SET o_counter = 0 WHERE scenes.id = ?`, - id, - ) - if err != nil { - return 0, err - } - - scene, err := qb.find(id) - if err != nil { - return 0, err - } - - return scene.OCounter, nil -} - -func (qb *sceneQueryBuilder) Destroy(id int) error { +func (qb *SceneStore) Destroy(ctx context.Context, id int) error { // delete all related table rows // TODO - this should be handled by a delete cascade - if err := qb.performersRepository().destroy([]int{id}); err != nil { + if err := qb.performersRepository().destroy(ctx, []int{id}); err != nil { return err } // scene markers should be handled prior to calling destroy // galleries should be handled prior to calling destroy - return qb.destroyExisting([]int{id}) + return qb.tableMgr.destroyExisting(ctx, []int{id}) } -func (qb *sceneQueryBuilder) Find(id int) (*models.Scene, error) { - return qb.find(id) +func (qb *SceneStore) Find(ctx context.Context, id int) (*models.Scene, error) { + return qb.find(ctx, id) } -func (qb *sceneQueryBuilder) FindMany(ids []int) ([]*models.Scene, error) { - var scenes []*models.Scene - for _, id := range ids { - scene, err := qb.Find(id) - if err != nil { - return nil, err - } +func (qb *SceneStore) FindMany(ctx context.Context, ids []int) ([]*models.Scene, error) { + table := qb.table() + q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(ids)) + unsorted, err := qb.getMany(ctx, q) + if err != nil { + return nil, err + } - if scene == nil { - return nil, fmt.Errorf("scene with id %d not found", id) - } + scenes := make([]*models.Scene, len(ids)) - scenes = append(scenes, scene) + for _, s := range unsorted { + i := intslice.IntIndex(ids, s.ID) + scenes[i] = s + } + + for i := range scenes { + if scenes[i] == nil { + return nil, fmt.Errorf("scene with id %d not found", ids[i]) + } } return scenes, nil } -func (qb *sceneQueryBuilder) find(id int) (*models.Scene, error) { - var ret models.Scene - if err := qb.get(id, &ret); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, nil - } +func (qb *SceneStore) selectDataset() *goqu.SelectDataset { + table := qb.table() + files := fileTableMgr.table + folders := folderTableMgr.table + checksum := fingerprintTableMgr.table.As("fingerprint_md5") + oshash := fingerprintTableMgr.table.As("fingerprint_oshash") + + return dialect.From(table).LeftJoin( + scenesFilesJoinTable, + goqu.On( + scenesFilesJoinTable.Col(sceneIDColumn).Eq(table.Col(idColumn)), + scenesFilesJoinTable.Col("primary").Eq(1), + ), + ).LeftJoin( + files, + goqu.On(files.Col(idColumn).Eq(scenesFilesJoinTable.Col(fileIDColumn))), + ).LeftJoin( + folders, + goqu.On(folders.Col(idColumn).Eq(files.Col("parent_folder_id"))), + ).LeftJoin( + checksum, + goqu.On( + checksum.Col(fileIDColumn).Eq(scenesFilesJoinTable.Col(fileIDColumn)), + checksum.Col("type").Eq(file.FingerprintTypeMD5), + ), + ).LeftJoin( + oshash, + goqu.On( + oshash.Col(fileIDColumn).Eq(scenesFilesJoinTable.Col(fileIDColumn)), + oshash.Col("type").Eq(file.FingerprintTypeOshash), + ), + ).Select( + qb.table().All(), + scenesFilesJoinTable.Col(fileIDColumn).As("primary_file_id"), + folders.Col("path").As("primary_file_folder_path"), + files.Col("basename").As("primary_file_basename"), + checksum.Col("fingerprint").As("primary_file_checksum"), + oshash.Col("fingerprint").As("primary_file_oshash"), + ) +} + +func (qb *SceneStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Scene, error) { + ret, err := qb.getMany(ctx, q) + if err != nil { return nil, err } - return &ret, nil + + if len(ret) == 0 { + return nil, sql.ErrNoRows + } + + return ret[0], nil } -func (qb *sceneQueryBuilder) FindByChecksum(checksum string) (*models.Scene, error) { - query := "SELECT * FROM scenes WHERE checksum = ? LIMIT 1" - args := []interface{}{checksum} - return qb.queryScene(query, args) +func (qb *SceneStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Scene, error) { + const single = false + var ret []*models.Scene + var lastID int + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f sceneQueryRow + if err := r.StructScan(&f); err != nil { + return err + } + + s := f.resolve() + if s.ID == lastID { + return fmt.Errorf("internal error: multiple rows returned for single scene id %d", s.ID) + } + lastID = s.ID + + ret = append(ret, s) + return nil + }); err != nil { + return nil, err + } + + return ret, nil } -func (qb *sceneQueryBuilder) FindByOSHash(oshash string) (*models.Scene, error) { - query := "SELECT * FROM scenes WHERE oshash = ? LIMIT 1" - args := []interface{}{oshash} - return qb.queryScene(query, args) +func (qb *SceneStore) GetFiles(ctx context.Context, id int) ([]*file.VideoFile, error) { + fileIDs, err := qb.filesRepository().get(ctx, id) + if err != nil { + return nil, err + } + + // use fileStore to load files + files, err := qb.fileStore.Find(ctx, fileIDs...) + if err != nil { + return nil, err + } + + ret := make([]*file.VideoFile, len(files)) + for i, f := range files { + var ok bool + ret[i], ok = f.(*file.VideoFile) + if !ok { + return nil, fmt.Errorf("expected file to be *file.VideoFile not %T", f) + } + } + + return ret, nil } -func (qb *sceneQueryBuilder) FindByPath(path string) (*models.Scene, error) { - query := selectAll(sceneTable) + "WHERE path = ? LIMIT 1" - args := []interface{}{path} - return qb.queryScene(query, args) +func (qb *SceneStore) GetManyFileIDs(ctx context.Context, ids []int) ([][]file.ID, error) { + const primaryOnly = false + return qb.filesRepository().getMany(ctx, ids, primaryOnly) } -func (qb *sceneQueryBuilder) FindByPerformerID(performerID int) ([]*models.Scene, error) { - args := []interface{}{performerID} - return qb.queryScenes(scenesForPerformerQuery, args) +func (qb *SceneStore) find(ctx context.Context, id int) (*models.Scene, error) { + q := qb.selectDataset().Where(qb.tableMgr.byID(id)) + + ret, err := qb.get(ctx, q) + if err != nil { + return nil, fmt.Errorf("getting scene by id %d: %w", id, err) + } + + return ret, nil } -func (qb *sceneQueryBuilder) FindByGalleryID(galleryID int) ([]*models.Scene, error) { - args := []interface{}{galleryID} - return qb.queryScenes(scenesForGalleryQuery, args) +func (qb *SceneStore) FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Scene, error) { + sq := dialect.From(scenesFilesJoinTable).Select(scenesFilesJoinTable.Col(sceneIDColumn)).Where( + scenesFilesJoinTable.Col(fileIDColumn).Eq(fileID), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting scenes by file id %d: %w", fileID, err) + } + + return ret, nil } -func (qb *sceneQueryBuilder) CountByPerformerID(performerID int) (int, error) { - args := []interface{}{performerID} - return qb.runCountQuery(qb.buildCountQuery(countScenesForPerformerQuery), args) +func (qb *SceneStore) CountByFileID(ctx context.Context, fileID file.ID) (int, error) { + joinTable := scenesFilesJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(fileIDColumn).Eq(fileID)) + return count(ctx, q) } -func (qb *sceneQueryBuilder) FindByMovieID(movieID int) ([]*models.Scene, error) { - args := []interface{}{movieID} - return qb.queryScenes(scenesForMovieQuery, args) +func (qb *SceneStore) FindByFingerprints(ctx context.Context, fp []file.Fingerprint) ([]*models.Scene, error) { + fingerprintTable := fingerprintTableMgr.table + + var ex []exp.Expression + + for _, v := range fp { + ex = append(ex, goqu.And( + fingerprintTable.Col("type").Eq(v.Type), + fingerprintTable.Col("fingerprint").Eq(v.Fingerprint), + )) + } + + sq := dialect.From(scenesFilesJoinTable). + InnerJoin( + fingerprintTable, + goqu.On(fingerprintTable.Col(fileIDColumn).Eq(scenesFilesJoinTable.Col(fileIDColumn))), + ). + Select(scenesFilesJoinTable.Col(sceneIDColumn)).Where(goqu.Or(ex...)) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting scenes by fingerprints: %w", err) + } + + return ret, nil } -func (qb *sceneQueryBuilder) CountByMovieID(movieID int) (int, error) { - args := []interface{}{movieID} - return qb.runCountQuery(qb.buildCountQuery(scenesForMovieQuery), args) +func (qb *SceneStore) FindByChecksum(ctx context.Context, checksum string) ([]*models.Scene, error) { + return qb.FindByFingerprints(ctx, []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: checksum, + }, + }) } -func (qb *sceneQueryBuilder) Count() (int, error) { - return qb.runCountQuery(qb.buildCountQuery("SELECT scenes.id FROM scenes"), nil) +func (qb *SceneStore) FindByOSHash(ctx context.Context, oshash string) ([]*models.Scene, error) { + return qb.FindByFingerprints(ctx, []file.Fingerprint{ + { + Type: file.FingerprintTypeOshash, + Fingerprint: oshash, + }, + }) } -func (qb *sceneQueryBuilder) Size() (float64, error) { - return qb.runSumQuery("SELECT SUM(cast(size as double)) as sum FROM scenes", nil) +func (qb *SceneStore) FindByPath(ctx context.Context, p string) ([]*models.Scene, error) { + filesTable := fileTableMgr.table + foldersTable := folderTableMgr.table + basename := filepath.Base(p) + dir := filepath.Dir(p) + + // replace wildcards + basename = strings.ReplaceAll(basename, "*", "%") + dir = strings.ReplaceAll(dir, "*", "%") + + sq := dialect.From(scenesFilesJoinTable).InnerJoin( + filesTable, + goqu.On(filesTable.Col(idColumn).Eq(scenesFilesJoinTable.Col(fileIDColumn))), + ).InnerJoin( + foldersTable, + goqu.On(foldersTable.Col(idColumn).Eq(filesTable.Col("parent_folder_id"))), + ).Select(scenesFilesJoinTable.Col(sceneIDColumn)).Where( + foldersTable.Col("path").Like(dir), + filesTable.Col("basename").Like(basename), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("getting scene by path %s: %w", p, err) + } + + return ret, nil } -func (qb *sceneQueryBuilder) Duration() (float64, error) { - return qb.runSumQuery("SELECT SUM(cast(duration as double)) as sum FROM scenes", nil) +func (qb *SceneStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Scene, error) { + table := qb.table() + + q := qb.selectDataset().Where( + table.Col(idColumn).Eq( + sq, + ), + ) + + return qb.getMany(ctx, q) } -func (qb *sceneQueryBuilder) CountByStudioID(studioID int) (int, error) { - args := []interface{}{studioID} - return qb.runCountQuery(qb.buildCountQuery(scenesForStudioQuery), args) +func (qb *SceneStore) FindByPerformerID(ctx context.Context, performerID int) ([]*models.Scene, error) { + sq := dialect.From(scenesPerformersJoinTable).Select(scenesPerformersJoinTable.Col(sceneIDColumn)).Where( + scenesPerformersJoinTable.Col(performerIDColumn).Eq(performerID), + ) + ret, err := qb.findBySubquery(ctx, sq) + + if err != nil { + return nil, fmt.Errorf("getting scenes for performer %d: %w", performerID, err) + } + + return ret, nil } -func (qb *sceneQueryBuilder) CountByTagID(tagID int) (int, error) { - args := []interface{}{tagID} - return qb.runCountQuery(qb.buildCountQuery(countScenesForTagQuery), args) +func (qb *SceneStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Scene, error) { + sq := dialect.From(galleriesScenesJoinTable).Select(galleriesScenesJoinTable.Col(sceneIDColumn)).Where( + galleriesScenesJoinTable.Col(galleryIDColumn).Eq(galleryID), + ) + ret, err := qb.findBySubquery(ctx, sq) + + if err != nil { + return nil, fmt.Errorf("getting scenes for gallery %d: %w", galleryID, err) + } + + return ret, nil +} + +func (qb *SceneStore) CountByPerformerID(ctx context.Context, performerID int) (int, error) { + joinTable := scenesPerformersJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(performerIDColumn).Eq(performerID)) + return count(ctx, q) +} + +func (qb *SceneStore) FindByMovieID(ctx context.Context, movieID int) ([]*models.Scene, error) { + sq := dialect.From(scenesMoviesJoinTable).Select(scenesMoviesJoinTable.Col(sceneIDColumn)).Where( + scenesMoviesJoinTable.Col(movieIDColumn).Eq(movieID), + ) + ret, err := qb.findBySubquery(ctx, sq) + + if err != nil { + return nil, fmt.Errorf("getting scenes for movie %d: %w", movieID, err) + } + + return ret, nil +} + +func (qb *SceneStore) CountByMovieID(ctx context.Context, movieID int) (int, error) { + joinTable := scenesMoviesJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(movieIDColumn).Eq(movieID)) + return count(ctx, q) +} + +func (qb *SceneStore) Count(ctx context.Context) (int, error) { + q := dialect.Select(goqu.COUNT("*")).From(qb.table()) + return count(ctx, q) +} + +func (qb *SceneStore) Size(ctx context.Context) (float64, error) { + table := qb.table() + fileTable := fileTableMgr.table + q := dialect.Select( + goqu.SUM(fileTableMgr.table.Col("size")), + ).From(table).InnerJoin( + scenesFilesJoinTable, + goqu.On(table.Col(idColumn).Eq(scenesFilesJoinTable.Col(sceneIDColumn))), + ).InnerJoin( + fileTable, + goqu.On(scenesFilesJoinTable.Col(fileIDColumn).Eq(fileTable.Col(idColumn))), + ) + var ret float64 + if err := querySimple(ctx, q, &ret); err != nil { + return 0, err + } + + return ret, nil +} + +func (qb *SceneStore) Duration(ctx context.Context) (float64, error) { + table := qb.table() + videoFileTable := videoFileTableMgr.table + + q := dialect.Select( + goqu.SUM(videoFileTable.Col("duration"))).From(table).InnerJoin( + scenesFilesJoinTable, + goqu.On(scenesFilesJoinTable.Col("scene_id").Eq(table.Col(idColumn))), + ).InnerJoin( + videoFileTable, + goqu.On(videoFileTable.Col("file_id").Eq(scenesFilesJoinTable.Col("file_id"))), + ) + + var ret float64 + if err := querySimple(ctx, q, &ret); err != nil { + return 0, err + } + + return ret, nil +} + +func (qb *SceneStore) CountByStudioID(ctx context.Context, studioID int) (int, error) { + table := qb.table() + + q := dialect.Select(goqu.COUNT("*")).From(table).Where(table.Col(studioIDColumn).Eq(studioID)) + return count(ctx, q) +} + +func (qb *SceneStore) CountByTagID(ctx context.Context, tagID int) (int, error) { + joinTable := scenesTagsJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(tagIDColumn).Eq(tagID)) + return count(ctx, q) +} + +func (qb *SceneStore) countMissingFingerprints(ctx context.Context, fpType string) (int, error) { + fpTable := fingerprintTableMgr.table.As("fingerprints_temp") + + q := dialect.From(scenesFilesJoinTable).LeftJoin( + fpTable, + goqu.On( + scenesFilesJoinTable.Col(fileIDColumn).Eq(fpTable.Col(fileIDColumn)), + fpTable.Col("type").Eq(fpType), + ), + ).Select(goqu.COUNT(goqu.DISTINCT(scenesFilesJoinTable.Col(sceneIDColumn)))).Where(fpTable.Col("fingerprint").IsNull()) + + return count(ctx, q) } // CountMissingChecksum returns the number of scenes missing a checksum value. -func (qb *sceneQueryBuilder) CountMissingChecksum() (int, error) { - return qb.runCountQuery(qb.buildCountQuery(countScenesForMissingChecksumQuery), []interface{}{}) +func (qb *SceneStore) CountMissingChecksum(ctx context.Context) (int, error) { + return qb.countMissingFingerprints(ctx, "md5") } // CountMissingOSHash returns the number of scenes missing an oshash value. -func (qb *sceneQueryBuilder) CountMissingOSHash() (int, error) { - return qb.runCountQuery(qb.buildCountQuery(countScenesForMissingOSHashQuery), []interface{}{}) +func (qb *SceneStore) CountMissingOSHash(ctx context.Context) (int, error) { + return qb.countMissingFingerprints(ctx, "oshash") } -func (qb *sceneQueryBuilder) Wall(q *string) ([]*models.Scene, error) { +func (qb *SceneStore) Wall(ctx context.Context, q *string) ([]*models.Scene, error) { s := "" if q != nil { s = *q } - query := selectAll(sceneTable) + "WHERE scenes.details LIKE '%" + s + "%' ORDER BY RANDOM() LIMIT 80" - return qb.queryScenes(query, nil) + + table := qb.table() + qq := qb.selectDataset().Prepared(true).Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) + return qb.getMany(ctx, qq) } -func (qb *sceneQueryBuilder) All() ([]*models.Scene, error) { - return qb.queryScenes(selectAll(sceneTable)+qb.getDefaultSceneSort(), nil) +func (qb *SceneStore) All(ctx context.Context) ([]*models.Scene, error) { + table := qb.table() + fileTable := fileTableMgr.table + folderTable := folderTableMgr.table + + return qb.getMany(ctx, qb.selectDataset().Order( + folderTable.Col("path").Asc(), + fileTable.Col("basename").Asc(), + table.Col("date").Asc(), + )) } func illegalFilterCombination(type1, type2 string) error { return fmt.Errorf("cannot have %s and %s in the same filter", type1, type2) } -func (qb *sceneQueryBuilder) validateFilter(sceneFilter *models.SceneFilterType) error { +func (qb *SceneStore) validateFilter(sceneFilter *models.SceneFilterType) error { const and = "AND" const or = "OR" const not = "NOT" @@ -371,61 +778,111 @@ func (qb *sceneQueryBuilder) validateFilter(sceneFilter *models.SceneFilterType) return nil } -func (qb *sceneQueryBuilder) makeFilter(sceneFilter *models.SceneFilterType) *filterBuilder { +func (qb *SceneStore) makeFilter(ctx context.Context, sceneFilter *models.SceneFilterType) *filterBuilder { query := &filterBuilder{} if sceneFilter.And != nil { - query.and(qb.makeFilter(sceneFilter.And)) + query.and(qb.makeFilter(ctx, sceneFilter.And)) } if sceneFilter.Or != nil { - query.or(qb.makeFilter(sceneFilter.Or)) + query.or(qb.makeFilter(ctx, sceneFilter.Or)) } if sceneFilter.Not != nil { - query.not(qb.makeFilter(sceneFilter.Not)) + query.not(qb.makeFilter(ctx, sceneFilter.Not)) } - query.handleCriterion(stringCriterionHandler(sceneFilter.Path, "scenes.path")) - query.handleCriterion(stringCriterionHandler(sceneFilter.Title, "scenes.title")) - query.handleCriterion(stringCriterionHandler(sceneFilter.Details, "scenes.details")) - query.handleCriterion(stringCriterionHandler(sceneFilter.Oshash, "scenes.oshash")) - query.handleCriterion(stringCriterionHandler(sceneFilter.Checksum, "scenes.checksum")) - query.handleCriterion(phashCriterionHandler(sceneFilter.Phash)) - query.handleCriterion(intCriterionHandler(sceneFilter.Rating, "scenes.rating")) - query.handleCriterion(intCriterionHandler(sceneFilter.OCounter, "scenes.o_counter")) - query.handleCriterion(boolCriterionHandler(sceneFilter.Organized, "scenes.organized")) - query.handleCriterion(durationCriterionHandler(sceneFilter.Duration, "scenes.duration")) - query.handleCriterion(resolutionCriterionHandler(sceneFilter.Resolution, "scenes.height", "scenes.width")) - query.handleCriterion(hasMarkersCriterionHandler(sceneFilter.HasMarkers)) - query.handleCriterion(sceneIsMissingCriterionHandler(qb, sceneFilter.IsMissing)) - query.handleCriterion(stringCriterionHandler(sceneFilter.URL, "scenes.url")) + query.handleCriterion(ctx, pathCriterionHandler(sceneFilter.Path, "folders.path", "files.basename", qb.addFoldersTable)) + query.handleCriterion(ctx, sceneFileCountCriterionHandler(qb, sceneFilter.FileCount)) + query.handleCriterion(ctx, stringCriterionHandler(sceneFilter.Title, "scenes.title")) + query.handleCriterion(ctx, stringCriterionHandler(sceneFilter.Details, "scenes.details")) + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { + if sceneFilter.Oshash != nil { + qb.addSceneFilesTable(f) + f.addLeftJoin(fingerprintTable, "fingerprints_oshash", "scenes_files.file_id = fingerprints_oshash.file_id AND fingerprints_oshash.type = 'oshash'") + } - query.handleCriterion(criterionHandlerFunc(func(f *filterBuilder) { - if sceneFilter.StashID != nil { - qb.stashIDRepository().join(f, "scene_stash_ids", "scenes.id") - stringCriterionHandler(sceneFilter.StashID, "scene_stash_ids.stash_id")(f) + stringCriterionHandler(sceneFilter.Oshash, "fingerprints_oshash.fingerprint")(ctx, f) + })) + + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { + if sceneFilter.Checksum != nil { + qb.addSceneFilesTable(f) + f.addLeftJoin(fingerprintTable, "fingerprints_md5", "scenes_files.file_id = fingerprints_md5.file_id AND fingerprints_md5.type = 'md5'") + } + + stringCriterionHandler(sceneFilter.Checksum, "fingerprints_md5.fingerprint")(ctx, f) + })) + + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { + if sceneFilter.Phash != nil { + qb.addSceneFilesTable(f) + f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'") + + value, _ := utils.StringToPhash(sceneFilter.Phash.Value) + intCriterionHandler(&models.IntCriterionInput{ + Value: int(value), + Modifier: sceneFilter.Phash.Modifier, + }, "fingerprints_phash.fingerprint", nil)(ctx, f) } })) - query.handleCriterion(boolCriterionHandler(sceneFilter.Interactive, "scenes.interactive")) - query.handleCriterion(intCriterionHandler(sceneFilter.InteractiveSpeed, "scenes.interactive_speed")) + query.handleCriterion(ctx, intCriterionHandler(sceneFilter.Rating, "scenes.rating", nil)) + query.handleCriterion(ctx, intCriterionHandler(sceneFilter.OCounter, "scenes.o_counter", nil)) + query.handleCriterion(ctx, boolCriterionHandler(sceneFilter.Organized, "scenes.organized", nil)) - query.handleCriterion(sceneCaptionCriterionHandler(qb, sceneFilter.Captions)) + query.handleCriterion(ctx, durationCriterionHandler(sceneFilter.Duration, "video_files.duration", qb.addVideoFilesTable)) + query.handleCriterion(ctx, resolutionCriterionHandler(sceneFilter.Resolution, "video_files.height", "video_files.width", qb.addVideoFilesTable)) - query.handleCriterion(sceneTagsCriterionHandler(qb, sceneFilter.Tags)) - query.handleCriterion(sceneTagCountCriterionHandler(qb, sceneFilter.TagCount)) - query.handleCriterion(scenePerformersCriterionHandler(qb, sceneFilter.Performers)) - query.handleCriterion(scenePerformerCountCriterionHandler(qb, sceneFilter.PerformerCount)) - query.handleCriterion(sceneStudioCriterionHandler(qb, sceneFilter.Studios)) - query.handleCriterion(sceneMoviesCriterionHandler(qb, sceneFilter.Movies)) - query.handleCriterion(scenePerformerTagsCriterionHandler(qb, sceneFilter.PerformerTags)) - query.handleCriterion(scenePerformerFavoriteCriterionHandler(sceneFilter.PerformerFavorite)) - query.handleCriterion(scenePerformerAgeCriterionHandler(sceneFilter.PerformerAge)) - query.handleCriterion(scenePhashDuplicatedCriterionHandler(sceneFilter.Duplicated)) + query.handleCriterion(ctx, hasMarkersCriterionHandler(sceneFilter.HasMarkers)) + query.handleCriterion(ctx, sceneIsMissingCriterionHandler(qb, sceneFilter.IsMissing)) + query.handleCriterion(ctx, stringCriterionHandler(sceneFilter.URL, "scenes.url")) + + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { + if sceneFilter.StashID != nil { + qb.stashIDRepository().join(f, "scene_stash_ids", "scenes.id") + stringCriterionHandler(sceneFilter.StashID, "scene_stash_ids.stash_id")(ctx, f) + } + })) + + query.handleCriterion(ctx, boolCriterionHandler(sceneFilter.Interactive, "video_files.interactive", qb.addVideoFilesTable)) + query.handleCriterion(ctx, intCriterionHandler(sceneFilter.InteractiveSpeed, "video_files.interactive_speed", qb.addVideoFilesTable)) + + query.handleCriterion(ctx, sceneCaptionCriterionHandler(qb, sceneFilter.Captions)) + + query.handleCriterion(ctx, sceneTagsCriterionHandler(qb, sceneFilter.Tags)) + query.handleCriterion(ctx, sceneTagCountCriterionHandler(qb, sceneFilter.TagCount)) + query.handleCriterion(ctx, scenePerformersCriterionHandler(qb, sceneFilter.Performers)) + query.handleCriterion(ctx, scenePerformerCountCriterionHandler(qb, sceneFilter.PerformerCount)) + query.handleCriterion(ctx, sceneStudioCriterionHandler(qb, sceneFilter.Studios)) + query.handleCriterion(ctx, sceneMoviesCriterionHandler(qb, sceneFilter.Movies)) + query.handleCriterion(ctx, scenePerformerTagsCriterionHandler(qb, sceneFilter.PerformerTags)) + query.handleCriterion(ctx, scenePerformerFavoriteCriterionHandler(sceneFilter.PerformerFavorite)) + query.handleCriterion(ctx, scenePerformerAgeCriterionHandler(sceneFilter.PerformerAge)) + query.handleCriterion(ctx, scenePhashDuplicatedCriterionHandler(sceneFilter.Duplicated, qb.addSceneFilesTable)) return query } -func (qb *sceneQueryBuilder) Query(options models.SceneQueryOptions) (*models.SceneQueryResult, error) { +func (qb *SceneStore) addSceneFilesTable(f *filterBuilder) { + f.addLeftJoin(scenesFilesTable, "", "scenes_files.scene_id = scenes.id") +} + +func (qb *SceneStore) addFilesTable(f *filterBuilder) { + qb.addSceneFilesTable(f) + f.addLeftJoin(fileTable, "", "scenes_files.file_id = files.id") +} + +func (qb *SceneStore) addFoldersTable(f *filterBuilder) { + qb.addFilesTable(f) + f.addLeftJoin(folderTable, "", "files.parent_folder_id = folders.id") +} + +func (qb *SceneStore) addVideoFilesTable(f *filterBuilder) { + qb.addSceneFilesTable(f) + f.addLeftJoin(videoFileTable, "", "video_files.file_id = scenes_files.file_id") +} + +func (qb *SceneStore) Query(ctx context.Context, options models.SceneQueryOptions) (*models.SceneQueryResult, error) { sceneFilter := options.SceneFilter findFilter := options.FindFilter @@ -440,27 +897,49 @@ func (qb *sceneQueryBuilder) Query(options models.SceneQueryOptions) (*models.Sc distinctIDs(&query, sceneTable) if q := findFilter.Q; q != nil && *q != "" { - query.join("scene_markers", "", "scene_markers.scene_id = scenes.id") - searchColumns := []string{"scenes.title", "scenes.details", "scenes.path", "scenes.oshash", "scenes.checksum", "scene_markers.title"} + query.addJoins( + join{ + table: scenesFilesTable, + onClause: "scenes_files.scene_id = scenes.id", + }, + join{ + table: fileTable, + onClause: "scenes_files.file_id = files.id", + }, + join{ + table: folderTable, + onClause: "files.parent_folder_id = folders.id", + }, + join{ + table: fingerprintTable, + onClause: "files_fingerprints.file_id = scenes_files.file_id", + }, + join{ + table: sceneMarkerTable, + onClause: "scene_markers.scene_id = scenes.id", + }, + ) + + searchColumns := []string{"scenes.title", "scenes.details", "folders.path", "files.basename", "files_fingerprints.fingerprint", "scene_markers.title"} query.parseQueryString(searchColumns, *q) } if err := qb.validateFilter(sceneFilter); err != nil { return nil, err } - filter := qb.makeFilter(sceneFilter) + filter := qb.makeFilter(ctx, sceneFilter) query.addFilter(filter) qb.setSceneSort(&query, findFilter) query.sortAndPagination += getPagination(findFilter) - result, err := qb.queryGroupedFields(options, query) + result, err := qb.queryGroupedFields(ctx, options, query) if err != nil { return nil, fmt.Errorf("error querying aggregate fields: %w", err) } - idsResult, err := query.findIDs() + idsResult, err := query.findIDs(ctx) if err != nil { return nil, fmt.Errorf("error finding IDs: %w", err) } @@ -469,7 +948,7 @@ func (qb *sceneQueryBuilder) Query(options models.SceneQueryOptions) (*models.Sc return result, nil } -func (qb *sceneQueryBuilder) queryGroupedFields(options models.SceneQueryOptions, query queryBuilder) (*models.SceneQueryResult, error) { +func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.SceneQueryOptions, query queryBuilder) (*models.SceneQueryResult, error) { if !options.Count && !options.TotalDuration && !options.TotalSize { // nothing to do - return empty result return models.NewSceneQueryResult(qb), nil @@ -482,13 +961,33 @@ func (qb *sceneQueryBuilder) queryGroupedFields(options models.SceneQueryOptions } if options.TotalDuration { - query.addColumn("COALESCE(scenes.duration, 0) as duration") - aggregateQuery.addColumn("COALESCE(SUM(temp.duration), 0) as duration") + query.addJoins( + join{ + table: scenesFilesTable, + onClause: "scenes_files.scene_id = scenes.id", + }, + join{ + table: videoFileTable, + onClause: "scenes_files.file_id = video_files.file_id", + }, + ) + query.addColumn("COALESCE(video_files.duration, 0) as duration") + aggregateQuery.addColumn("SUM(temp.duration) as duration") } if options.TotalSize { - query.addColumn("COALESCE(scenes.size, 0) as size") - aggregateQuery.addColumn("COALESCE(SUM(temp.size), 0) as size") + query.addJoins( + join{ + table: scenesFilesTable, + onClause: "scenes_files.scene_id = scenes.id", + }, + join{ + table: fileTable, + onClause: "scenes_files.file_id = files.id", + }, + ) + query.addColumn("COALESCE(files.size, 0) as size") + aggregateQuery.addColumn("SUM(temp.size) as size") } const includeSortPagination = false @@ -496,70 +995,69 @@ func (qb *sceneQueryBuilder) queryGroupedFields(options models.SceneQueryOptions out := struct { Total int - Duration float64 - Size float64 + Duration null.Float + Size null.Float }{} - if err := qb.repository.queryStruct(aggregateQuery.toSQL(includeSortPagination), query.args, &out); err != nil { + if err := qb.repository.queryStruct(ctx, aggregateQuery.toSQL(includeSortPagination), query.args, &out); err != nil { return nil, err } ret := models.NewSceneQueryResult(qb) ret.Count = out.Total - ret.TotalDuration = out.Duration - ret.TotalSize = out.Size + ret.TotalDuration = out.Duration.Float64 + ret.TotalSize = out.Size.Float64 return ret, nil } -func phashCriterionHandler(phashFilter *models.StringCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { - if phashFilter != nil { - // convert value to int from hex - // ignore errors - value, _ := utils.StringToPhash(phashFilter.Value) - - if modifier := phashFilter.Modifier; phashFilter.Modifier.IsValid() { - switch modifier { - case models.CriterionModifierEquals: - f.addWhere("scenes.phash = ?", value) - case models.CriterionModifierNotEquals: - f.addWhere("scenes.phash != ?", value) - case models.CriterionModifierIsNull: - f.addWhere("scenes.phash IS NULL") - case models.CriterionModifierNotNull: - f.addWhere("scenes.phash IS NOT NULL") - } - } - } +func sceneFileCountCriterionHandler(qb *SceneStore, fileCount *models.IntCriterionInput) criterionHandlerFunc { + h := countCriterionHandlerBuilder{ + primaryTable: sceneTable, + joinTable: scenesFilesTable, + primaryFK: sceneIDColumn, } + + return h.handler(fileCount) } -func scenePhashDuplicatedCriterionHandler(duplicatedFilter *models.PHashDuplicationCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { +func scenePhashDuplicatedCriterionHandler(duplicatedFilter *models.PHashDuplicationCriterionInput, addJoinFn func(f *filterBuilder)) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { // TODO: Wishlist item: Implement Distance matching if duplicatedFilter != nil { + if addJoinFn != nil { + addJoinFn(f) + } + var v string if *duplicatedFilter.Duplicated { v = ">" } else { v = "=" } - f.addInnerJoin("(SELECT id FROM scenes JOIN (SELECT phash FROM scenes GROUP BY phash HAVING COUNT(phash) "+v+" 1) dupes on scenes.phash = dupes.phash)", "scph", "scenes.id = scph.id") + + f.addInnerJoin("(SELECT file_id FROM files_fingerprints INNER JOIN (SELECT fingerprint FROM files_fingerprints WHERE type = 'phash' GROUP BY fingerprint HAVING COUNT (fingerprint) "+v+" 1) dupes on files_fingerprints.fingerprint = dupes.fingerprint)", "scph", "scenes_files.file_id = scph.file_id") } } } -func durationCriterionHandler(durationFilter *models.IntCriterionInput, column string) criterionHandlerFunc { - return func(f *filterBuilder) { +func durationCriterionHandler(durationFilter *models.IntCriterionInput, column string, addJoinFn func(f *filterBuilder)) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if durationFilter != nil { + if addJoinFn != nil { + addJoinFn(f) + } clause, args := getIntCriterionWhereClause("cast("+column+" as int)", *durationFilter) f.addWhere(clause, args...) } } } -func resolutionCriterionHandler(resolution *models.ResolutionCriterionInput, heightColumn string, widthColumn string) criterionHandlerFunc { - return func(f *filterBuilder) { +func resolutionCriterionHandler(resolution *models.ResolutionCriterionInput, heightColumn string, widthColumn string, addJoinFn func(f *filterBuilder)) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if resolution != nil && resolution.Value.IsValid() { + if addJoinFn != nil { + addJoinFn(f) + } + min := resolution.Value.GetMinResolution() max := resolution.Value.GetMaxResolution() @@ -580,7 +1078,7 @@ func resolutionCriterionHandler(resolution *models.ResolutionCriterionInput, hei } func hasMarkersCriterionHandler(hasMarkers *string) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if hasMarkers != nil { f.addLeftJoin("scene_markers", "", "scene_markers.scene_id = scenes.id") if *hasMarkers == "true" { @@ -592,8 +1090,8 @@ func hasMarkersCriterionHandler(hasMarkers *string) criterionHandlerFunc { } } -func sceneIsMissingCriterionHandler(qb *sceneQueryBuilder, isMissing *string) criterionHandlerFunc { - return func(f *filterBuilder) { +func sceneIsMissingCriterionHandler(qb *SceneStore, isMissing *string) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if isMissing != nil && *isMissing != "" { switch *isMissing { case "galleries": @@ -615,6 +1113,10 @@ func sceneIsMissingCriterionHandler(qb *sceneQueryBuilder, isMissing *string) cr case "stash_id": qb.stashIDRepository().join(f, "scene_stash_ids", "scenes.id") f.addWhere("scene_stash_ids.scene_id IS NULL") + case "phash": + qb.addSceneFilesTable(f) + f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'") + f.addWhere("fingerprints_phash.fingerprint IS NULL") default: f.addWhere("(scenes." + *isMissing + " IS NULL OR TRIM(scenes." + *isMissing + ") = '')") } @@ -622,7 +1124,7 @@ func sceneIsMissingCriterionHandler(qb *sceneQueryBuilder, isMissing *string) cr } } -func (qb *sceneQueryBuilder) getMultiCriterionHandlerBuilder(foreignTable, joinTable, foreignFK string, addJoinsFunc func(f *filterBuilder)) multiCriterionHandlerBuilder { +func (qb *SceneStore) getMultiCriterionHandlerBuilder(foreignTable, joinTable, foreignFK string, addJoinsFunc func(f *filterBuilder)) multiCriterionHandlerBuilder { return multiCriterionHandlerBuilder{ primaryTable: sceneTable, foreignTable: foreignTable, @@ -633,19 +1135,20 @@ func (qb *sceneQueryBuilder) getMultiCriterionHandlerBuilder(foreignTable, joinT } } -func sceneCaptionCriterionHandler(qb *sceneQueryBuilder, captions *models.StringCriterionInput) criterionHandlerFunc { +func sceneCaptionCriterionHandler(qb *SceneStore, captions *models.StringCriterionInput) criterionHandlerFunc { h := stringListCriterionHandlerBuilder{ - joinTable: sceneCaptionsTable, - stringColumn: sceneCaptionCodeColumn, + joinTable: videoCaptionsTable, + stringColumn: captionCodeColumn, addJoinTable: func(f *filterBuilder) { - qb.captionRepository().join(f, "", "scenes.id") + qb.addSceneFilesTable(f) + f.addLeftJoin(videoCaptionsTable, "", "video_captions.file_id = scenes_files.file_id") }, } return h.handler(captions) } -func sceneTagsCriterionHandler(qb *sceneQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { +func sceneTagsCriterionHandler(qb *SceneStore, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { h := joinedHierarchicalMultiCriterionHandlerBuilder{ tx: qb.tx, @@ -662,7 +1165,7 @@ func sceneTagsCriterionHandler(qb *sceneQueryBuilder, tags *models.HierarchicalM return h.handler(tags) } -func sceneTagCountCriterionHandler(qb *sceneQueryBuilder, tagCount *models.IntCriterionInput) criterionHandlerFunc { +func sceneTagCountCriterionHandler(qb *SceneStore, tagCount *models.IntCriterionInput) criterionHandlerFunc { h := countCriterionHandlerBuilder{ primaryTable: sceneTable, joinTable: scenesTagsTable, @@ -672,7 +1175,7 @@ func sceneTagCountCriterionHandler(qb *sceneQueryBuilder, tagCount *models.IntCr return h.handler(tagCount) } -func scenePerformersCriterionHandler(qb *sceneQueryBuilder, performers *models.MultiCriterionInput) criterionHandlerFunc { +func scenePerformersCriterionHandler(qb *SceneStore, performers *models.MultiCriterionInput) criterionHandlerFunc { h := joinedMultiCriterionHandlerBuilder{ primaryTable: sceneTable, joinTable: performersScenesTable, @@ -688,7 +1191,7 @@ func scenePerformersCriterionHandler(qb *sceneQueryBuilder, performers *models.M return h.handler(performers) } -func scenePerformerCountCriterionHandler(qb *sceneQueryBuilder, performerCount *models.IntCriterionInput) criterionHandlerFunc { +func scenePerformerCountCriterionHandler(qb *SceneStore, performerCount *models.IntCriterionInput) criterionHandlerFunc { h := countCriterionHandlerBuilder{ primaryTable: sceneTable, joinTable: performersScenesTable, @@ -699,7 +1202,7 @@ func scenePerformerCountCriterionHandler(qb *sceneQueryBuilder, performerCount * } func scenePerformerFavoriteCriterionHandler(performerfavorite *bool) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if performerfavorite != nil { f.addLeftJoin("performers_scenes", "", "scenes.id = performers_scenes.scene_id") @@ -719,7 +1222,7 @@ GROUP BY performers_scenes.scene_id HAVING SUM(performers.favorite) = 0)`, "nofa } func scenePerformerAgeCriterionHandler(performerAge *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if performerAge != nil { f.addInnerJoin("performers_scenes", "", "scenes.id = performers_scenes.scene_id") f.addInnerJoin("performers", "", "performers_scenes.performer_id = performers.id") @@ -735,7 +1238,7 @@ func scenePerformerAgeCriterionHandler(performerAge *models.IntCriterionInput) c } } -func sceneStudioCriterionHandler(qb *sceneQueryBuilder, studios *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { +func sceneStudioCriterionHandler(qb *SceneStore, studios *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { h := hierarchicalMultiCriterionHandlerBuilder{ tx: qb.tx, @@ -749,7 +1252,7 @@ func sceneStudioCriterionHandler(qb *sceneQueryBuilder, studios *models.Hierarch return h.handler(studios) } -func sceneMoviesCriterionHandler(qb *sceneQueryBuilder, movies *models.MultiCriterionInput) criterionHandlerFunc { +func sceneMoviesCriterionHandler(qb *SceneStore, movies *models.MultiCriterionInput) criterionHandlerFunc { addJoinsFunc := func(f *filterBuilder) { qb.moviesRepository().join(f, "", "scenes.id") f.addLeftJoin("movies", "", "movies_scenes.movie_id = movies.id") @@ -758,8 +1261,8 @@ func sceneMoviesCriterionHandler(qb *sceneQueryBuilder, movies *models.MultiCrit return h.handler(movies) } -func scenePerformerTagsCriterionHandler(qb *sceneQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { +func scenePerformerTagsCriterionHandler(qb *SceneStore, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { + return func(ctx context.Context, f *filterBuilder) { if tags != nil { if tags.Modifier == models.CriterionModifierIsNull || tags.Modifier == models.CriterionModifierNotNull { var notClause string @@ -778,7 +1281,7 @@ func scenePerformerTagsCriterionHandler(qb *sceneQueryBuilder, tags *models.Hier return } - valuesClause := getHierarchicalValues(qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) + valuesClause := getHierarchicalValues(ctx, qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) f.addWith(`performer_tags AS ( SELECT ps.scene_id, t.column1 AS root_tag_id FROM performers_scenes ps @@ -793,15 +1296,35 @@ INNER JOIN (` + valuesClause + `) t ON t.column2 = pt.tag_id } } -func (qb *sceneQueryBuilder) getDefaultSceneSort() string { - return " ORDER BY scenes.path, scenes.date ASC " -} - -func (qb *sceneQueryBuilder) setSceneSort(query *queryBuilder, findFilter *models.FindFilterType) { +func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindFilterType) { if findFilter == nil || findFilter.Sort == nil || *findFilter.Sort == "" { return } sort := findFilter.GetSort("title") + + addFileTable := func() { + query.addJoins( + join{ + table: scenesFilesTable, + onClause: "scenes_files.scene_id = scenes.id", + }, + join{ + table: fileTable, + onClause: "scenes_files.file_id = files.id", + }, + ) + } + + addVideoFileTable := func() { + addFileTable() + query.addJoins( + join{ + table: videoFileTable, + onClause: "video_files.file_id = scenes_files.file_id", + }, + ) + } + direction := findFilter.GetDirection() switch sort { case "movie_scene_number": @@ -811,29 +1334,57 @@ func (qb *sceneQueryBuilder) setSceneSort(query *queryBuilder, findFilter *model query.sortAndPagination += getCountSort(sceneTable, scenesTagsTable, sceneIDColumn, direction) case "performer_count": query.sortAndPagination += getCountSort(sceneTable, performersScenesTable, sceneIDColumn, direction) + case "file_count": + query.sortAndPagination += getCountSort(sceneTable, scenesFilesTable, sceneIDColumn, direction) + case "path": + // special handling for path + addFileTable() + query.addJoins( + join{ + table: folderTable, + onClause: "files.parent_folder_id = folders.id", + }, + ) + query.sortAndPagination += fmt.Sprintf(" ORDER BY folders.path %s, files.basename %[1]s", direction) + case "perceptual_similarity": + // special handling for phash + addFileTable() + query.addJoins( + join{ + table: fingerprintTable, + as: "fingerprints_phash", + onClause: "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'", + }, + ) + + query.sortAndPagination += " ORDER BY fingerprints_phash.fingerprint " + direction + ", files.size DESC" + case "bitrate": + sort = "bit_rate" + addVideoFileTable() + query.sortAndPagination += getSort(sort, direction, videoFileTable) + case "file_mod_time": + sort = "mod_time" + addFileTable() + query.sortAndPagination += getSort(sort, direction, fileTable) + case "framerate": + sort = "frame_rate" + addVideoFileTable() + query.sortAndPagination += getSort(sort, direction, videoFileTable) + case "filesize": + addFileTable() + query.sortAndPagination += getSort(sort, direction, fileTable) + case "duration": + addVideoFileTable() + query.sortAndPagination += getSort(sort, direction, videoFileTable) + case "interactive", "interactive_speed": + addVideoFileTable() + query.sortAndPagination += getSort(sort, direction, videoFileTable) default: query.sortAndPagination += getSort(sort, direction, "scenes") } } -func (qb *sceneQueryBuilder) queryScene(query string, args []interface{}) (*models.Scene, error) { - results, err := qb.queryScenes(query, args) - if err != nil || len(results) < 1 { - return nil, err - } - return results[0], nil -} - -func (qb *sceneQueryBuilder) queryScenes(query string, args []interface{}) ([]*models.Scene, error) { - var ret models.Scenes - if err := qb.query(query, args, &ret); err != nil { - return nil, err - } - - return []*models.Scene(ret), nil -} - -func (qb *sceneQueryBuilder) imageRepository() *imageRepository { +func (qb *SceneStore) imageRepository() *imageRepository { return &imageRepository{ repository: repository{ tx: qb.tx, @@ -844,19 +1395,19 @@ func (qb *sceneQueryBuilder) imageRepository() *imageRepository { } } -func (qb *sceneQueryBuilder) GetCover(sceneID int) ([]byte, error) { - return qb.imageRepository().get(sceneID) +func (qb *SceneStore) GetCover(ctx context.Context, sceneID int) ([]byte, error) { + return qb.imageRepository().get(ctx, sceneID) } -func (qb *sceneQueryBuilder) UpdateCover(sceneID int, image []byte) error { - return qb.imageRepository().replace(sceneID, image) +func (qb *SceneStore) UpdateCover(ctx context.Context, sceneID int, image []byte) error { + return qb.imageRepository().replace(ctx, sceneID, image) } -func (qb *sceneQueryBuilder) DestroyCover(sceneID int) error { - return qb.imageRepository().destroy([]int{sceneID}) +func (qb *SceneStore) DestroyCover(ctx context.Context, sceneID int) error { + return qb.imageRepository().destroy(ctx, []int{sceneID}) } -func (qb *sceneQueryBuilder) moviesRepository() *repository { +func (qb *SceneStore) moviesRepository() *repository { return &repository{ tx: qb.tx, tableName: moviesScenesTable, @@ -864,14 +1415,16 @@ func (qb *sceneQueryBuilder) moviesRepository() *repository { } } -func (qb *sceneQueryBuilder) GetMovies(id int) (ret []models.MoviesScenes, err error) { - if err := qb.moviesRepository().getAll(id, func(rows *sqlx.Rows) error { - var ms models.MoviesScenes +func (qb *SceneStore) GetMovies(ctx context.Context, id int) (ret []models.MoviesScenes, err error) { + ret = []models.MoviesScenes{} + + if err := qb.moviesRepository().getAll(ctx, id, func(rows *sqlx.Rows) error { + var ms moviesScenesRow if err := rows.StructScan(&ms); err != nil { return err } - ret = append(ret, ms) + ret = append(ret, ms.resolve(id)) return nil }); err != nil { return nil, err @@ -880,24 +1433,22 @@ func (qb *sceneQueryBuilder) GetMovies(id int) (ret []models.MoviesScenes, err e return ret, nil } -func (qb *sceneQueryBuilder) UpdateMovies(sceneID int, movies []models.MoviesScenes) error { - // destroy existing joins - r := qb.moviesRepository() - if err := r.destroy([]int{sceneID}); err != nil { - return err +func (qb *SceneStore) filesRepository() *filesRepository { + return &filesRepository{ + repository: repository{ + tx: qb.tx, + tableName: scenesFilesTable, + idColumn: sceneIDColumn, + }, } - - for _, m := range movies { - m.SceneID = sceneID - if _, err := r.insert(m); err != nil { - return err - } - } - - return nil } -func (qb *sceneQueryBuilder) performersRepository() *joinRepository { +func (qb *SceneStore) AddFileID(ctx context.Context, id int, fileID file.ID) error { + const firstPrimary = false + return scenesFilesTableMgr.insertJoins(ctx, id, firstPrimary, []file.ID{fileID}) +} + +func (qb *SceneStore) performersRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -908,16 +1459,11 @@ func (qb *sceneQueryBuilder) performersRepository() *joinRepository { } } -func (qb *sceneQueryBuilder) GetPerformerIDs(id int) ([]int, error) { - return qb.performersRepository().getIDs(id) +func (qb *SceneStore) GetPerformerIDs(ctx context.Context, id int) ([]int, error) { + return qb.performersRepository().getIDs(ctx, id) } -func (qb *sceneQueryBuilder) UpdatePerformers(id int, performerIDs []int) error { - // Delete the existing joins and then create new ones - return qb.performersRepository().replace(id, performerIDs) -} - -func (qb *sceneQueryBuilder) tagsRepository() *joinRepository { +func (qb *SceneStore) tagsRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -928,16 +1474,11 @@ func (qb *sceneQueryBuilder) tagsRepository() *joinRepository { } } -func (qb *sceneQueryBuilder) GetTagIDs(id int) ([]int, error) { - return qb.tagsRepository().getIDs(id) +func (qb *SceneStore) GetTagIDs(ctx context.Context, id int) ([]int, error) { + return qb.tagsRepository().getIDs(ctx, id) } -func (qb *sceneQueryBuilder) UpdateTags(id int, tagIDs []int) error { - // Delete the existing joins and then create new ones - return qb.tagsRepository().replace(id, tagIDs) -} - -func (qb *sceneQueryBuilder) galleriesRepository() *joinRepository { +func (qb *SceneStore) galleriesRepository() *joinRepository { return &joinRepository{ repository: repository{ tx: qb.tx, @@ -948,16 +1489,15 @@ func (qb *sceneQueryBuilder) galleriesRepository() *joinRepository { } } -func (qb *sceneQueryBuilder) GetGalleryIDs(id int) ([]int, error) { - return qb.galleriesRepository().getIDs(id) +func (qb *SceneStore) GetGalleryIDs(ctx context.Context, id int) ([]int, error) { + return qb.galleriesRepository().getIDs(ctx, id) } -func (qb *sceneQueryBuilder) UpdateGalleries(id int, galleryIDs []int) error { - // Delete the existing joins and then create new ones - return qb.galleriesRepository().replace(id, galleryIDs) +func (qb *SceneStore) AddGalleryIDs(ctx context.Context, sceneID int, galleryIDs []int) error { + return scenesGalleriesTableMgr.addJoins(ctx, sceneID, galleryIDs) } -func (qb *sceneQueryBuilder) stashIDRepository() *stashIDRepository { +func (qb *SceneStore) stashIDRepository() *stashIDRepository { return &stashIDRepository{ repository{ tx: qb.tx, @@ -967,19 +1507,15 @@ func (qb *sceneQueryBuilder) stashIDRepository() *stashIDRepository { } } -func (qb *sceneQueryBuilder) GetStashIDs(sceneID int) ([]*models.StashID, error) { - return qb.stashIDRepository().get(sceneID) +func (qb *SceneStore) GetStashIDs(ctx context.Context, sceneID int) ([]models.StashID, error) { + return qb.stashIDRepository().get(ctx, sceneID) } -func (qb *sceneQueryBuilder) UpdateStashIDs(sceneID int, stashIDs []models.StashID) error { - return qb.stashIDRepository().replace(sceneID, stashIDs) -} - -func (qb *sceneQueryBuilder) FindDuplicates(distance int) ([][]*models.Scene, error) { +func (qb *SceneStore) FindDuplicates(ctx context.Context, distance int) ([][]*models.Scene, error) { var dupeIds [][]int if distance == 0 { var ids []string - if err := qb.tx.Select(&ids, findExactDuplicateQuery); err != nil { + if err := qb.tx.Select(ctx, &ids, findExactDuplicateQuery); err != nil { return nil, err } @@ -988,15 +1524,18 @@ func (qb *sceneQueryBuilder) FindDuplicates(distance int) ([][]*models.Scene, er var sceneIds []int for _, strId := range strIds { if intId, err := strconv.Atoi(strId); err == nil { - sceneIds = append(sceneIds, intId) + sceneIds = intslice.IntAppendUnique(sceneIds, intId) } } - dupeIds = append(dupeIds, sceneIds) + // filter out + if len(sceneIds) > 1 { + dupeIds = append(dupeIds, sceneIds) + } } } else { var hashes []*utils.Phash - if err := qb.queryFunc(findAllPhashesQuery, nil, false, func(rows *sqlx.Rows) error { + if err := qb.queryFunc(ctx, findAllPhashesQuery, nil, false, func(rows *sqlx.Rows) error { phash := utils.Phash{ Bucket: -1, } @@ -1015,7 +1554,7 @@ func (qb *sceneQueryBuilder) FindDuplicates(distance int) ([][]*models.Scene, er var duplicates [][]*models.Scene for _, sceneIds := range dupeIds { - if scenes, err := qb.FindMany(sceneIds); err == nil { + if scenes, err := qb.FindMany(ctx, sceneIds); err == nil { duplicates = append(duplicates, scenes) } } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index c79c1dc16..669ee9a6d 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -1,11 +1,11 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" - "github.com/stashapp/stash/pkg/database" "github.com/stashapp/stash/pkg/models" ) @@ -22,57 +22,54 @@ type sceneMarkerQueryBuilder struct { repository } -func NewSceneMarkerReaderWriter(tx dbi) *sceneMarkerQueryBuilder { - return &sceneMarkerQueryBuilder{ - repository{ - tx: tx, - tableName: sceneMarkerTable, - idColumn: idColumn, - }, - } +var SceneMarkerReaderWriter = &sceneMarkerQueryBuilder{ + repository{ + tableName: sceneMarkerTable, + idColumn: idColumn, + }, } -func (qb *sceneMarkerQueryBuilder) Create(newObject models.SceneMarker) (*models.SceneMarker, error) { +func (qb *sceneMarkerQueryBuilder) Create(ctx context.Context, newObject models.SceneMarker) (*models.SceneMarker, error) { var ret models.SceneMarker - if err := qb.insertObject(newObject, &ret); err != nil { + if err := qb.insertObject(ctx, newObject, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *sceneMarkerQueryBuilder) Update(updatedObject models.SceneMarker) (*models.SceneMarker, error) { +func (qb *sceneMarkerQueryBuilder) Update(ctx context.Context, updatedObject models.SceneMarker) (*models.SceneMarker, error) { const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } var ret models.SceneMarker - if err := qb.get(updatedObject.ID, &ret); err != nil { + if err := qb.getByID(ctx, updatedObject.ID, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *sceneMarkerQueryBuilder) Destroy(id int) error { - return qb.destroyExisting([]int{id}) +func (qb *sceneMarkerQueryBuilder) Destroy(ctx context.Context, id int) error { + return qb.destroyExisting(ctx, []int{id}) } -func (qb *sceneMarkerQueryBuilder) Find(id int) (*models.SceneMarker, error) { +func (qb *sceneMarkerQueryBuilder) Find(ctx context.Context, id int) (*models.SceneMarker, error) { query := "SELECT * FROM scene_markers WHERE id = ? LIMIT 1" args := []interface{}{id} - results, err := qb.querySceneMarkers(query, args) + results, err := qb.querySceneMarkers(ctx, query, args) if err != nil || len(results) < 1 { return nil, err } return results[0], nil } -func (qb *sceneMarkerQueryBuilder) FindMany(ids []int) ([]*models.SceneMarker, error) { +func (qb *sceneMarkerQueryBuilder) FindMany(ctx context.Context, ids []int) ([]*models.SceneMarker, error) { var markers []*models.SceneMarker for _, id := range ids { - marker, err := qb.Find(id) + marker, err := qb.Find(ctx, id) if err != nil { return nil, err } @@ -87,7 +84,7 @@ func (qb *sceneMarkerQueryBuilder) FindMany(ids []int) ([]*models.SceneMarker, e return markers, nil } -func (qb *sceneMarkerQueryBuilder) FindBySceneID(sceneID int) ([]*models.SceneMarker, error) { +func (qb *sceneMarkerQueryBuilder) FindBySceneID(ctx context.Context, sceneID int) ([]*models.SceneMarker, error) { query := ` SELECT scene_markers.* FROM scene_markers WHERE scene_markers.scene_id = ? @@ -95,15 +92,15 @@ func (qb *sceneMarkerQueryBuilder) FindBySceneID(sceneID int) ([]*models.SceneMa ORDER BY scene_markers.seconds ASC ` args := []interface{}{sceneID} - return qb.querySceneMarkers(query, args) + return qb.querySceneMarkers(ctx, query, args) } -func (qb *sceneMarkerQueryBuilder) CountByTagID(tagID int) (int, error) { +func (qb *sceneMarkerQueryBuilder) CountByTagID(ctx context.Context, tagID int) (int, error) { args := []interface{}{tagID, tagID} - return qb.runCountQuery(qb.buildCountQuery(countSceneMarkersForTagQuery), args) + return qb.runCountQuery(ctx, qb.buildCountQuery(countSceneMarkersForTagQuery), args) } -func (qb *sceneMarkerQueryBuilder) GetMarkerStrings(q *string, sort *string) ([]*models.MarkerStringsResultType, error) { +func (qb *sceneMarkerQueryBuilder) GetMarkerStrings(ctx context.Context, q *string, sort *string) ([]*models.MarkerStringsResultType, error) { query := "SELECT count(*) as `count`, scene_markers.id as id, scene_markers.title as title FROM scene_markers" if q != nil { query += " WHERE title LIKE '%" + *q + "%'" @@ -115,30 +112,30 @@ func (qb *sceneMarkerQueryBuilder) GetMarkerStrings(q *string, sort *string) ([] query += " ORDER BY title ASC" } var args []interface{} - return qb.queryMarkerStringsResultType(query, args) + return qb.queryMarkerStringsResultType(ctx, query, args) } -func (qb *sceneMarkerQueryBuilder) Wall(q *string) ([]*models.SceneMarker, error) { +func (qb *sceneMarkerQueryBuilder) Wall(ctx context.Context, q *string) ([]*models.SceneMarker, error) { s := "" if q != nil { s = *q } query := "SELECT scene_markers.* FROM scene_markers WHERE scene_markers.title LIKE '%" + s + "%' ORDER BY RANDOM() LIMIT 80" - return qb.querySceneMarkers(query, nil) + return qb.querySceneMarkers(ctx, query, nil) } -func (qb *sceneMarkerQueryBuilder) makeFilter(sceneMarkerFilter *models.SceneMarkerFilterType) *filterBuilder { +func (qb *sceneMarkerQueryBuilder) makeFilter(ctx context.Context, sceneMarkerFilter *models.SceneMarkerFilterType) *filterBuilder { query := &filterBuilder{} - query.handleCriterion(sceneMarkerTagIDCriterionHandler(qb, sceneMarkerFilter.TagID)) - query.handleCriterion(sceneMarkerTagsCriterionHandler(qb, sceneMarkerFilter.Tags)) - query.handleCriterion(sceneMarkerSceneTagsCriterionHandler(qb, sceneMarkerFilter.SceneTags)) - query.handleCriterion(sceneMarkerPerformersCriterionHandler(qb, sceneMarkerFilter.Performers)) + query.handleCriterion(ctx, sceneMarkerTagIDCriterionHandler(qb, sceneMarkerFilter.TagID)) + query.handleCriterion(ctx, sceneMarkerTagsCriterionHandler(qb, sceneMarkerFilter.Tags)) + query.handleCriterion(ctx, sceneMarkerSceneTagsCriterionHandler(qb, sceneMarkerFilter.SceneTags)) + query.handleCriterion(ctx, sceneMarkerPerformersCriterionHandler(qb, sceneMarkerFilter.Performers)) return query } -func (qb *sceneMarkerQueryBuilder) Query(sceneMarkerFilter *models.SceneMarkerFilterType, findFilter *models.FindFilterType) ([]*models.SceneMarker, int, error) { +func (qb *sceneMarkerQueryBuilder) Query(ctx context.Context, sceneMarkerFilter *models.SceneMarkerFilterType, findFilter *models.FindFilterType) ([]*models.SceneMarker, int, error) { if sceneMarkerFilter == nil { sceneMarkerFilter = &models.SceneMarkerFilterType{} } @@ -154,19 +151,19 @@ func (qb *sceneMarkerQueryBuilder) Query(sceneMarkerFilter *models.SceneMarkerFi query.parseQueryString(searchColumns, *q) } - filter := qb.makeFilter(sceneMarkerFilter) + filter := qb.makeFilter(ctx, sceneMarkerFilter) query.addFilter(filter) query.sortAndPagination = qb.getSceneMarkerSort(&query, findFilter) + getPagination(findFilter) - idsResult, countResult, err := query.executeFind() + idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err } var sceneMarkers []*models.SceneMarker for _, id := range idsResult { - sceneMarker, err := qb.Find(id) + sceneMarker, err := qb.Find(ctx, id) if err != nil { return nil, 0, err } @@ -178,7 +175,7 @@ func (qb *sceneMarkerQueryBuilder) Query(sceneMarkerFilter *models.SceneMarkerFi } func sceneMarkerTagIDCriterionHandler(qb *sceneMarkerQueryBuilder, tagID *string) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if tagID != nil { f.addLeftJoin("scene_markers_tags", "", "scene_markers_tags.scene_marker_id = scene_markers.id") @@ -188,7 +185,7 @@ func sceneMarkerTagIDCriterionHandler(qb *sceneMarkerQueryBuilder, tagID *string } func sceneMarkerTagsCriterionHandler(qb *sceneMarkerQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if tags != nil { if tags.Modifier == models.CriterionModifierIsNull || tags.Modifier == models.CriterionModifierNotNull { var notClause string @@ -205,7 +202,7 @@ func sceneMarkerTagsCriterionHandler(qb *sceneMarkerQueryBuilder, tags *models.H if len(tags.Value) == 0 { return } - valuesClause := getHierarchicalValues(qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) + valuesClause := getHierarchicalValues(ctx, qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) f.addWith(`marker_tags AS ( SELECT mt.scene_marker_id, t.column1 AS root_tag_id FROM scene_markers_tags mt @@ -223,7 +220,7 @@ INNER JOIN (` + valuesClause + `) t ON t.column2 = m.primary_tag_id } func sceneMarkerSceneTagsCriterionHandler(qb *sceneMarkerQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if tags != nil { if tags.Modifier == models.CriterionModifierIsNull || tags.Modifier == models.CriterionModifierNotNull { var notClause string @@ -241,7 +238,7 @@ func sceneMarkerSceneTagsCriterionHandler(qb *sceneMarkerQueryBuilder, tags *mod return } - valuesClause := getHierarchicalValues(qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) + valuesClause := getHierarchicalValues(ctx, qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth) f.addWith(`scene_tags AS ( SELECT st.scene_id, t.column1 AS root_tag_id FROM scenes_tags st @@ -269,10 +266,10 @@ func sceneMarkerPerformersCriterionHandler(qb *sceneMarkerQueryBuilder, performe } handler := h.handler(performers) - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { // Make sure scenes is included, otherwise excludes filter fails f.addLeftJoin(sceneTable, "", "scenes.id = scene_markers.scene_id") - handler(f) + handler(ctx, f) } } @@ -286,20 +283,22 @@ func (qb *sceneMarkerQueryBuilder) getSceneMarkerSort(query *queryBuilder, findF sort = "updated_at" tableName = "scenes" } - return getSort(sort, direction, tableName) + + additional := ", scene_markers.scene_id ASC, scene_markers.seconds ASC" + return getSort(sort, direction, tableName) + additional } -func (qb *sceneMarkerQueryBuilder) querySceneMarkers(query string, args []interface{}) ([]*models.SceneMarker, error) { +func (qb *sceneMarkerQueryBuilder) querySceneMarkers(ctx context.Context, query string, args []interface{}) ([]*models.SceneMarker, error) { var ret models.SceneMarkers - if err := qb.query(query, args, &ret); err != nil { + if err := qb.query(ctx, query, args, &ret); err != nil { return nil, err } return []*models.SceneMarker(ret), nil } -func (qb *sceneMarkerQueryBuilder) queryMarkerStringsResultType(query string, args []interface{}) ([]*models.MarkerStringsResultType, error) { - rows, err := database.DB.Queryx(query, args...) +func (qb *sceneMarkerQueryBuilder) queryMarkerStringsResultType(ctx context.Context, query string, args []interface{}) ([]*models.MarkerStringsResultType, error) { + rows, err := qb.tx.Queryx(ctx, query, args...) if err != nil && !errors.Is(err, sql.ErrNoRows) { return nil, err } @@ -332,11 +331,11 @@ func (qb *sceneMarkerQueryBuilder) tagsRepository() *joinRepository { } } -func (qb *sceneMarkerQueryBuilder) GetTagIDs(id int) ([]int, error) { - return qb.tagsRepository().getIDs(id) +func (qb *sceneMarkerQueryBuilder) GetTagIDs(ctx context.Context, id int) ([]int, error) { + return qb.tagsRepository().getIDs(ctx, id) } -func (qb *sceneMarkerQueryBuilder) UpdateTags(id int, tagIDs []int) error { +func (qb *sceneMarkerQueryBuilder) UpdateTags(ctx context.Context, id int, tagIDs []int) error { // Delete the existing joins and then create new ones - return qb.tagsRepository().replace(id, tagIDs) + return qb.tagsRepository().replace(ctx, id, tagIDs) } diff --git a/pkg/sqlite/scene_marker_test.go b/pkg/sqlite/scene_marker_test.go index 2fa0d7501..76a4dd845 100644 --- a/pkg/sqlite/scene_marker_test.go +++ b/pkg/sqlite/scene_marker_test.go @@ -4,18 +4,20 @@ package sqlite_test import ( + "context" "testing" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sqlite" "github.com/stretchr/testify/assert" ) func TestMarkerFindBySceneID(t *testing.T) { - withTxn(func(r models.Repository) error { - mqb := r.SceneMarker() + withTxn(func(ctx context.Context) error { + mqb := sqlite.SceneMarkerReaderWriter sceneID := sceneIDs[sceneIdxWithMarkers] - markers, err := mqb.FindBySceneID(sceneID) + markers, err := mqb.FindBySceneID(ctx, sceneID) if err != nil { t.Errorf("Error finding markers: %s", err.Error()) @@ -26,7 +28,7 @@ func TestMarkerFindBySceneID(t *testing.T) { assert.Equal(t, sceneIDs[sceneIdxWithMarkers], int(marker.SceneID.Int64)) } - markers, err = mqb.FindBySceneID(0) + markers, err = mqb.FindBySceneID(ctx, 0) if err != nil { t.Errorf("Error finding marker: %s", err.Error()) @@ -39,10 +41,10 @@ func TestMarkerFindBySceneID(t *testing.T) { } func TestMarkerCountByTagID(t *testing.T) { - withTxn(func(r models.Repository) error { - mqb := r.SceneMarker() + withTxn(func(ctx context.Context) error { + mqb := sqlite.SceneMarkerReaderWriter - markerCount, err := mqb.CountByTagID(tagIDs[tagIdxWithPrimaryMarkers]) + markerCount, err := mqb.CountByTagID(ctx, tagIDs[tagIdxWithPrimaryMarkers]) if err != nil { t.Errorf("error calling CountByTagID: %s", err.Error()) @@ -50,7 +52,7 @@ func TestMarkerCountByTagID(t *testing.T) { assert.Equal(t, 3, markerCount) - markerCount, err = mqb.CountByTagID(tagIDs[tagIdxWithMarkers]) + markerCount, err = mqb.CountByTagID(ctx, tagIDs[tagIdxWithMarkers]) if err != nil { t.Errorf("error calling CountByTagID: %s", err.Error()) @@ -58,7 +60,7 @@ func TestMarkerCountByTagID(t *testing.T) { assert.Equal(t, 1, markerCount) - markerCount, err = mqb.CountByTagID(0) + markerCount, err = mqb.CountByTagID(ctx, 0) if err != nil { t.Errorf("error calling CountByTagID: %s", err.Error()) @@ -71,9 +73,9 @@ func TestMarkerCountByTagID(t *testing.T) { } func TestMarkerQuerySortBySceneUpdated(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { sort := "scenes_updated_at" - _, _, err := r.SceneMarker().Query(nil, &models.FindFilterType{ + _, _, err := sqlite.SceneMarkerReaderWriter.Query(ctx, nil, &models.FindFilterType{ Sort: &sort, }) @@ -92,9 +94,9 @@ func TestMarkerQueryTags(t *testing.T) { findFilter *models.FindFilterType } - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { testTags := func(m *models.SceneMarker, markerFilter *models.SceneMarkerFilterType) { - tagIDs, err := r.SceneMarker().GetTagIDs(m.ID) + tagIDs, err := sqlite.SceneMarkerReaderWriter.GetTagIDs(ctx, m.ID) if err != nil { t.Errorf("error getting marker tag ids: %v", err) } @@ -129,7 +131,7 @@ func TestMarkerQueryTags(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - markers := queryMarkers(t, r.SceneMarker(), tc.markerFilter, tc.findFilter) + markers := queryMarkers(ctx, t, sqlite.SceneMarkerReaderWriter, tc.markerFilter, tc.findFilter) assert.Greater(t, len(markers), 0) for _, m := range markers { testTags(m, tc.markerFilter) @@ -148,12 +150,20 @@ func TestMarkerQuerySceneTags(t *testing.T) { findFilter *models.FindFilterType } - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { testTags := func(m *models.SceneMarker, markerFilter *models.SceneMarkerFilterType) { - tagIDs, err := r.Scene().GetTagIDs(int(m.SceneID.Int64)) + s, err := db.Scene.Find(ctx, int(m.SceneID.Int64)) if err != nil { t.Errorf("error getting marker tag ids: %v", err) + return } + + if err := s.LoadTagIDs(ctx, db.Scene); err != nil { + t.Errorf("error getting marker tag ids: %v", err) + return + } + + tagIDs := s.TagIDs.List() if markerFilter.SceneTags.Modifier == models.CriterionModifierIsNull && len(tagIDs) > 0 { t.Errorf("expected marker %d to have no scene tags - found %d", m.ID, len(tagIDs)) } @@ -185,7 +195,7 @@ func TestMarkerQuerySceneTags(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - markers := queryMarkers(t, r.SceneMarker(), tc.markerFilter, tc.findFilter) + markers := queryMarkers(ctx, t, sqlite.SceneMarkerReaderWriter, tc.markerFilter, tc.findFilter) assert.Greater(t, len(markers), 0) for _, m := range markers { testTags(m, tc.markerFilter) @@ -197,9 +207,9 @@ func TestMarkerQuerySceneTags(t *testing.T) { }) } -func queryMarkers(t *testing.T, sqb models.SceneMarkerReader, markerFilter *models.SceneMarkerFilterType, findFilter *models.FindFilterType) []*models.SceneMarker { +func queryMarkers(ctx context.Context, t *testing.T, sqb models.SceneMarkerReader, markerFilter *models.SceneMarkerFilterType, findFilter *models.FindFilterType) []*models.SceneMarker { t.Helper() - result, _, err := sqb.Query(markerFilter, findFilter) + result, _, err := sqb.Query(ctx, markerFilter, findFilter) if err != nil { t.Errorf("Error querying markers: %v", err) } diff --git a/pkg/sqlite/scene_test.go b/pkg/sqlite/scene_test.go index dc70d7637..80fd901d6 100644 --- a/pkg/sqlite/scene_test.go +++ b/pkg/sqlite/scene_test.go @@ -4,79 +4,1742 @@ package sqlite_test import ( + "context" "database/sql" "fmt" "math" + "path/filepath" + "reflect" "regexp" "strconv" "testing" + "time" - "github.com/stretchr/testify/assert" - - "github.com/stashapp/stash/pkg/hash/md5" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" + "github.com/stretchr/testify/assert" ) -func TestSceneFind(t *testing.T) { - withTxn(func(r models.Repository) error { - // assume that the first scene is sceneWithGalleryPath - sqb := r.Scene() - - const sceneIdx = 0 - sceneID := sceneIDs[sceneIdx] - scene, err := sqb.Find(sceneID) - - if err != nil { - t.Errorf("Error finding scene: %s", err.Error()) +func loadSceneRelationships(ctx context.Context, expected models.Scene, actual *models.Scene) error { + if expected.GalleryIDs.Loaded() { + if err := actual.LoadGalleryIDs(ctx, db.Scene); err != nil { + return err } - - assert.Equal(t, getSceneStringValue(sceneIdx, "Path"), scene.Path) - - sceneID = 0 - scene, err = sqb.Find(sceneID) - - if err != nil { - t.Errorf("Error finding scene: %s", err.Error()) + } + if expected.TagIDs.Loaded() { + if err := actual.LoadTagIDs(ctx, db.Scene); err != nil { + return err } + } + if expected.PerformerIDs.Loaded() { + if err := actual.LoadPerformerIDs(ctx, db.Scene); err != nil { + return err + } + } + if expected.Movies.Loaded() { + if err := actual.LoadMovies(ctx, db.Scene); err != nil { + return err + } + } + if expected.StashIDs.Loaded() { + if err := actual.LoadStashIDs(ctx, db.Scene); err != nil { + return err + } + } + if expected.Files.Loaded() { + if err := actual.LoadFiles(ctx, db.Scene); err != nil { + return err + } + } - assert.Nil(t, scene) + // clear Path, Checksum, PrimaryFileID + if expected.Path == "" { + actual.Path = "" + } + if expected.Checksum == "" { + actual.Checksum = "" + } + if expected.OSHash == "" { + actual.OSHash = "" + } + if expected.PrimaryFileID == nil { + actual.PrimaryFileID = nil + } - return nil - }) + return nil } -func TestSceneFindByPath(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() +func Test_sceneQueryBuilder_Create(t *testing.T) { + var ( + title = "title" + details = "details" + url = "url" + rating = 3 + ocounter = 5 + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + sceneIndex = 123 + sceneIndex2 = 234 + endpoint1 = "endpoint1" + endpoint2 = "endpoint2" + stashID1 = "stashid1" + stashID2 = "stashid2" - const sceneIdx = 1 - scenePath := getSceneStringValue(sceneIdx, "Path") - scene, err := sqb.FindByPath(scenePath) + date = models.NewDate("2003-02-01") - if err != nil { - t.Errorf("Error finding scene: %s", err.Error()) + videoFile = makeFileWithID(fileIdxStartVideoFiles) + ) + + tests := []struct { + name string + newObject models.Scene + wantErr bool + }{ + { + "full", + models.Scene{ + Title: title, + Details: details, + URL: url, + Date: &date, + Rating: &rating, + Organized: true, + OCounter: ocounter, + StudioID: &studioIDs[studioIdxWithScene], + CreatedAt: createdAt, + UpdatedAt: updatedAt, + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithScene]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]}), + Movies: models.NewRelatedMovies([]models.MoviesScenes{ + { + MovieID: movieIDs[movieIdxWithScene], + SceneIndex: &sceneIndex, + }, + { + MovieID: movieIDs[movieIdxWithStudio], + SceneIndex: &sceneIndex2, + }, + }), + StashIDs: models.NewRelatedStashIDs([]models.StashID{ + { + StashID: stashID1, + Endpoint: endpoint1, + }, + { + StashID: stashID2, + Endpoint: endpoint2, + }, + }), + }, + false, + }, + { + "with file", + models.Scene{ + Title: title, + Details: details, + URL: url, + Date: &date, + Rating: &rating, + Organized: true, + OCounter: ocounter, + StudioID: &studioIDs[studioIdxWithScene], + Files: models.NewRelatedVideoFiles([]*file.VideoFile{ + videoFile.(*file.VideoFile), + }), + CreatedAt: createdAt, + UpdatedAt: updatedAt, + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithScene]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]}), + Movies: models.NewRelatedMovies([]models.MoviesScenes{ + { + MovieID: movieIDs[movieIdxWithScene], + SceneIndex: &sceneIndex, + }, + { + MovieID: movieIDs[movieIdxWithStudio], + SceneIndex: &sceneIndex2, + }, + }), + StashIDs: models.NewRelatedStashIDs([]models.StashID{ + { + StashID: stashID1, + Endpoint: endpoint1, + }, + { + StashID: stashID2, + Endpoint: endpoint2, + }, + }), + }, + false, + }, + { + "invalid studio id", + models.Scene{ + StudioID: &invalidID, + }, + true, + }, + { + "invalid gallery id", + models.Scene{ + GalleryIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid tag id", + models.Scene{ + TagIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid performer id", + models.Scene{ + PerformerIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid movie id", + models.Scene{ + Movies: models.NewRelatedMovies([]models.MoviesScenes{ + { + MovieID: invalidID, + SceneIndex: &sceneIndex, + }, + }), + }, + true, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + var fileIDs []file.ID + if tt.newObject.Files.Loaded() { + for _, f := range tt.newObject.Files.List() { + fileIDs = append(fileIDs, f.ID) + } + } + + s := tt.newObject + if err := qb.Create(ctx, &s, fileIDs); (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.Create() error = %v, wantErr = %v", err, tt.wantErr) + } + + if tt.wantErr { + assert.Zero(s.ID) + return + } + + assert.NotZero(s.ID) + + copy := tt.newObject + copy.ID = s.ID + + // load relationships + if err := loadSceneRelationships(ctx, copy, &s); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return + } + + assert.Equal(copy, s) + + // ensure can find the scene + found, err := qb.Find(ctx, s.ID) + if err != nil { + t.Errorf("sceneQueryBuilder.Find() error = %v", err) + } + + if !assert.NotNil(found) { + return + } + + // load relationships + if err := loadSceneRelationships(ctx, copy, found); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return + } + assert.Equal(copy, *found) + + return + }) + } +} + +func clearSceneFileIDs(scene *models.Scene) { + if scene.Files.Loaded() { + for _, f := range scene.Files.List() { + f.Base().ID = 0 + } + } +} + +func makeSceneFileWithID(i int) *file.VideoFile { + ret := makeSceneFile(i) + ret.ID = sceneFileIDs[i] + return ret +} + +func Test_sceneQueryBuilder_Update(t *testing.T) { + var ( + title = "title" + details = "details" + url = "url" + rating = 3 + ocounter = 5 + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + sceneIndex = 123 + sceneIndex2 = 234 + endpoint1 = "endpoint1" + endpoint2 = "endpoint2" + stashID1 = "stashid1" + stashID2 = "stashid2" + + date = models.NewDate("2003-02-01") + ) + + tests := []struct { + name string + updatedObject *models.Scene + wantErr bool + }{ + { + "full", + &models.Scene{ + ID: sceneIDs[sceneIdxWithGallery], + Title: title, + Details: details, + URL: url, + Date: &date, + Rating: &rating, + Organized: true, + OCounter: ocounter, + StudioID: &studioIDs[studioIdxWithScene], + CreatedAt: createdAt, + UpdatedAt: updatedAt, + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithScene]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]}), + Movies: models.NewRelatedMovies([]models.MoviesScenes{ + { + MovieID: movieIDs[movieIdxWithScene], + SceneIndex: &sceneIndex, + }, + { + MovieID: movieIDs[movieIdxWithStudio], + SceneIndex: &sceneIndex2, + }, + }), + StashIDs: models.NewRelatedStashIDs([]models.StashID{ + { + StashID: stashID1, + Endpoint: endpoint1, + }, + { + StashID: stashID2, + Endpoint: endpoint2, + }, + }), + }, + false, + }, + { + "clear nullables", + &models.Scene{ + ID: sceneIDs[sceneIdxWithSpacedName], + GalleryIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Movies: models.NewRelatedMovies([]models.MoviesScenes{}), + StashIDs: models.NewRelatedStashIDs([]models.StashID{}), + }, + false, + }, + { + "clear gallery ids", + &models.Scene{ + ID: sceneIDs[sceneIdxWithGallery], + GalleryIDs: models.NewRelatedIDs([]int{}), + }, + false, + }, + { + "clear tag ids", + &models.Scene{ + ID: sceneIDs[sceneIdxWithTag], + TagIDs: models.NewRelatedIDs([]int{}), + }, + false, + }, + { + "clear performer ids", + &models.Scene{ + ID: sceneIDs[sceneIdxWithPerformer], + PerformerIDs: models.NewRelatedIDs([]int{}), + }, + false, + }, + { + "clear movies", + &models.Scene{ + ID: sceneIDs[sceneIdxWithMovie], + Movies: models.NewRelatedMovies([]models.MoviesScenes{}), + }, + false, + }, + { + "invalid studio id", + &models.Scene{ + ID: sceneIDs[sceneIdxWithGallery], + StudioID: &invalidID, + }, + true, + }, + { + "invalid gallery id", + &models.Scene{ + ID: sceneIDs[sceneIdxWithGallery], + GalleryIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid tag id", + &models.Scene{ + ID: sceneIDs[sceneIdxWithGallery], + TagIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid performer id", + &models.Scene{ + ID: sceneIDs[sceneIdxWithGallery], + PerformerIDs: models.NewRelatedIDs([]int{invalidID}), + }, + true, + }, + { + "invalid movie id", + &models.Scene{ + ID: sceneIDs[sceneIdxWithSpacedName], + Movies: models.NewRelatedMovies([]models.MoviesScenes{ + { + MovieID: invalidID, + SceneIndex: &sceneIndex, + }, + }), + }, + true, + }, + } + + qb := db.Scene + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + copy := *tt.updatedObject + + if err := qb.Update(ctx, tt.updatedObject); (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.Update() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wantErr { + return + } + + s, err := qb.Find(ctx, tt.updatedObject.ID) + if err != nil { + t.Errorf("sceneQueryBuilder.Find() error = %v", err) + } + + // load relationships + if err := loadSceneRelationships(ctx, copy, s); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return + } + + assert.Equal(copy, *s) + }) + } +} + +func clearScenePartial() models.ScenePartial { + // leave mandatory fields + return models.ScenePartial{ + Title: models.OptionalString{Set: true, Null: true}, + Details: models.OptionalString{Set: true, Null: true}, + URL: models.OptionalString{Set: true, Null: true}, + Date: models.OptionalDate{Set: true, Null: true}, + Rating: models.OptionalInt{Set: true, Null: true}, + StudioID: models.OptionalInt{Set: true, Null: true}, + GalleryIDs: &models.UpdateIDs{Mode: models.RelationshipUpdateModeSet}, + TagIDs: &models.UpdateIDs{Mode: models.RelationshipUpdateModeSet}, + PerformerIDs: &models.UpdateIDs{Mode: models.RelationshipUpdateModeSet}, + StashIDs: &models.UpdateStashIDs{Mode: models.RelationshipUpdateModeSet}, + } +} + +func Test_sceneQueryBuilder_UpdatePartial(t *testing.T) { + var ( + title = "title" + details = "details" + url = "url" + rating = 3 + ocounter = 5 + createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) + sceneIndex = 123 + sceneIndex2 = 234 + endpoint1 = "endpoint1" + endpoint2 = "endpoint2" + stashID1 = "stashid1" + stashID2 = "stashid2" + + date = models.NewDate("2003-02-01") + ) + + tests := []struct { + name string + id int + partial models.ScenePartial + want models.Scene + wantErr bool + }{ + { + "full", + sceneIDs[sceneIdxWithSpacedName], + models.ScenePartial{ + Title: models.NewOptionalString(title), + Details: models.NewOptionalString(details), + URL: models.NewOptionalString(url), + Date: models.NewOptionalDate(date), + Rating: models.NewOptionalInt(rating), + Organized: models.NewOptionalBool(true), + OCounter: models.NewOptionalInt(ocounter), + StudioID: models.NewOptionalInt(studioIDs[studioIdxWithScene]), + CreatedAt: models.NewOptionalTime(createdAt), + UpdatedAt: models.NewOptionalTime(updatedAt), + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdxWithScene]}, + Mode: models.RelationshipUpdateModeSet, + }, + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]}, + Mode: models.RelationshipUpdateModeSet, + }, + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]}, + Mode: models.RelationshipUpdateModeSet, + }, + MovieIDs: &models.UpdateMovieIDs{ + Movies: []models.MoviesScenes{ + { + MovieID: movieIDs[movieIdxWithScene], + SceneIndex: &sceneIndex, + }, + { + MovieID: movieIDs[movieIdxWithStudio], + SceneIndex: &sceneIndex2, + }, + }, + Mode: models.RelationshipUpdateModeSet, + }, + StashIDs: &models.UpdateStashIDs{ + StashIDs: []models.StashID{ + { + StashID: stashID1, + Endpoint: endpoint1, + }, + { + StashID: stashID2, + Endpoint: endpoint2, + }, + }, + Mode: models.RelationshipUpdateModeSet, + }, + }, + models.Scene{ + ID: sceneIDs[sceneIdxWithSpacedName], + Files: models.NewRelatedVideoFiles([]*file.VideoFile{ + makeSceneFile(sceneIdxWithSpacedName), + }), + Title: title, + Details: details, + URL: url, + Date: &date, + Rating: &rating, + Organized: true, + OCounter: ocounter, + StudioID: &studioIDs[studioIdxWithScene], + CreatedAt: createdAt, + UpdatedAt: updatedAt, + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithScene]}), + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]}), + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]}), + Movies: models.NewRelatedMovies([]models.MoviesScenes{ + { + MovieID: movieIDs[movieIdxWithScene], + SceneIndex: &sceneIndex, + }, + { + MovieID: movieIDs[movieIdxWithStudio], + SceneIndex: &sceneIndex2, + }, + }), + StashIDs: models.NewRelatedStashIDs([]models.StashID{ + { + StashID: stashID1, + Endpoint: endpoint1, + }, + { + StashID: stashID2, + Endpoint: endpoint2, + }, + }), + }, + false, + }, + { + "clear all", + sceneIDs[sceneIdxWithSpacedName], + clearScenePartial(), + models.Scene{ + ID: sceneIDs[sceneIdxWithSpacedName], + Files: models.NewRelatedVideoFiles([]*file.VideoFile{ + makeSceneFile(sceneIdxWithSpacedName), + }), + GalleryIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Movies: models.NewRelatedMovies([]models.MoviesScenes{}), + StashIDs: models.NewRelatedStashIDs([]models.StashID{}), + }, + false, + }, + { + "invalid id", + invalidID, + models.ScenePartial{}, + models.Scene{}, + true, + }, + } + for _, tt := range tests { + qb := db.Scene + + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + got, err := qb.UpdatePartial(ctx, tt.id, tt.partial) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.UpdatePartial() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + // load relationships + if err := loadSceneRelationships(ctx, tt.want, got); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return + } + + // ignore file ids + clearSceneFileIDs(got) + + assert.Equal(tt.want, *got) + + s, err := qb.Find(ctx, tt.id) + if err != nil { + t.Errorf("sceneQueryBuilder.Find() error = %v", err) + } + + // load relationships + if err := loadSceneRelationships(ctx, tt.want, s); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return + } + // ignore file ids + clearSceneFileIDs(s) + + assert.Equal(tt.want, *s) + }) + } +} + +func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) { + var ( + sceneIndex = 123 + sceneIndex2 = 234 + endpoint1 = "endpoint1" + endpoint2 = "endpoint2" + stashID1 = "stashid1" + stashID2 = "stashid2" + + movieScenes = []models.MoviesScenes{ + { + MovieID: movieIDs[movieIdxWithDupName], + SceneIndex: &sceneIndex, + }, + { + MovieID: movieIDs[movieIdxWithStudio], + SceneIndex: &sceneIndex2, + }, } - assert.Equal(t, sceneIDs[sceneIdx], scene.ID) - assert.Equal(t, scenePath, scene.Path) - - scenePath = "not exist" - scene, err = sqb.FindByPath(scenePath) - - if err != nil { - t.Errorf("Error finding scene: %s", err.Error()) + stashIDs = []models.StashID{ + { + StashID: stashID1, + Endpoint: endpoint1, + }, + { + StashID: stashID2, + Endpoint: endpoint2, + }, } + ) - assert.Nil(t, scene) + tests := []struct { + name string + id int + partial models.ScenePartial + want models.Scene + wantErr bool + }{ + { + "add galleries", + sceneIDs[sceneIdxWithGallery], + models.ScenePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdx1WithImage], galleryIDs[galleryIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + GalleryIDs: models.NewRelatedIDs(append(indexesToIDs(galleryIDs, sceneGalleries[sceneIdxWithGallery]), + galleryIDs[galleryIdx1WithImage], + galleryIDs[galleryIdx1WithPerformer], + )), + }, + false, + }, + { + "add tags", + sceneIDs[sceneIdxWithTwoTags], + models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithDupName], tagIDs[tagIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + TagIDs: models.NewRelatedIDs(append(indexesToIDs(tagIDs, sceneTags[sceneIdxWithTwoTags]), + tagIDs[tagIdx1WithDupName], + tagIDs[tagIdx1WithGallery], + )), + }, + false, + }, + { + "add performers", + sceneIDs[sceneIdxWithTwoPerformers], + models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithDupName], performerIDs[performerIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + PerformerIDs: models.NewRelatedIDs(append(indexesToIDs(performerIDs, scenePerformers[sceneIdxWithTwoPerformers]), + performerIDs[performerIdx1WithDupName], + performerIDs[performerIdx1WithGallery], + )), + }, + false, + }, + { + "add movies", + sceneIDs[sceneIdxWithMovie], + models.ScenePartial{ + MovieIDs: &models.UpdateMovieIDs{ + Movies: movieScenes, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + Movies: models.NewRelatedMovies(append([]models.MoviesScenes{ + { + MovieID: indexesToIDs(movieIDs, sceneMovies[sceneIdxWithMovie])[0], + }, + }, movieScenes...)), + }, + false, + }, + { + "add stash ids", + sceneIDs[sceneIdxWithSpacedName], + models.ScenePartial{ + StashIDs: &models.UpdateStashIDs{ + StashIDs: stashIDs, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + StashIDs: models.NewRelatedStashIDs(append([]models.StashID{sceneStashID(sceneIdxWithSpacedName)}, stashIDs...)), + }, + false, + }, + { + "add duplicate galleries", + sceneIDs[sceneIdxWithGallery], + models.ScenePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdxWithScene], galleryIDs[galleryIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + GalleryIDs: models.NewRelatedIDs(append(indexesToIDs(galleryIDs, sceneGalleries[sceneIdxWithGallery]), + galleryIDs[galleryIdx1WithPerformer], + )), + }, + false, + }, + { + "add duplicate tags", + sceneIDs[sceneIdxWithTwoTags], + models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + TagIDs: models.NewRelatedIDs(append(indexesToIDs(tagIDs, sceneTags[sceneIdxWithTwoTags]), + tagIDs[tagIdx1WithGallery], + )), + }, + false, + }, + { + "add duplicate performers", + sceneIDs[sceneIdxWithTwoPerformers], + models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithGallery]}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + PerformerIDs: models.NewRelatedIDs(append(indexesToIDs(performerIDs, scenePerformers[sceneIdxWithTwoPerformers]), + performerIDs[performerIdx1WithGallery], + )), + }, + false, + }, + { + "add duplicate movies", + sceneIDs[sceneIdxWithMovie], + models.ScenePartial{ + MovieIDs: &models.UpdateMovieIDs{ + Movies: append([]models.MoviesScenes{ + { + MovieID: movieIDs[movieIdxWithScene], + SceneIndex: &sceneIndex, + }, + }, + movieScenes..., + ), + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + Movies: models.NewRelatedMovies(append([]models.MoviesScenes{ + { + MovieID: indexesToIDs(movieIDs, sceneMovies[sceneIdxWithMovie])[0], + }, + }, movieScenes...)), + }, + false, + }, + { + "add duplicate stash ids", + sceneIDs[sceneIdxWithSpacedName], + models.ScenePartial{ + StashIDs: &models.UpdateStashIDs{ + StashIDs: []models.StashID{ + sceneStashID(sceneIdxWithSpacedName), + }, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{ + StashIDs: models.NewRelatedStashIDs([]models.StashID{sceneStashID(sceneIdxWithSpacedName)}), + }, + false, + }, + { + "add invalid galleries", + sceneIDs[sceneIdxWithGallery], + models.ScenePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{invalidID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{}, + true, + }, + { + "add invalid tags", + sceneIDs[sceneIdxWithTwoTags], + models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{invalidID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{}, + true, + }, + { + "add invalid performers", + sceneIDs[sceneIdxWithTwoPerformers], + models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{invalidID}, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{}, + true, + }, + { + "add invalid movies", + sceneIDs[sceneIdxWithMovie], + models.ScenePartial{ + MovieIDs: &models.UpdateMovieIDs{ + Movies: []models.MoviesScenes{ + { + MovieID: invalidID, + }, + }, + Mode: models.RelationshipUpdateModeAdd, + }, + }, + models.Scene{}, + true, + }, + { + "remove galleries", + sceneIDs[sceneIdxWithGallery], + models.ScenePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdxWithScene]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + GalleryIDs: models.NewRelatedIDs([]int{}), + }, + false, + }, + { + "remove tags", + sceneIDs[sceneIdxWithTwoTags], + models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithScene]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + TagIDs: models.NewRelatedIDs([]int{tagIDs[tagIdx2WithScene]}), + }, + false, + }, + { + "remove performers", + sceneIDs[sceneIdxWithTwoPerformers], + models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithScene]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + PerformerIDs: models.NewRelatedIDs([]int{performerIDs[performerIdx2WithScene]}), + }, + false, + }, + { + "remove movies", + sceneIDs[sceneIdxWithMovie], + models.ScenePartial{ + MovieIDs: &models.UpdateMovieIDs{ + Movies: []models.MoviesScenes{ + { + MovieID: movieIDs[movieIdxWithScene], + }, + }, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + Movies: models.NewRelatedMovies([]models.MoviesScenes{}), + }, + false, + }, + { + "remove stash ids", + sceneIDs[sceneIdxWithSpacedName], + models.ScenePartial{ + StashIDs: &models.UpdateStashIDs{ + StashIDs: []models.StashID{sceneStashID(sceneIdxWithSpacedName)}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + StashIDs: models.NewRelatedStashIDs([]models.StashID{}), + }, + false, + }, + { + "remove unrelated galleries", + sceneIDs[sceneIdxWithGallery], + models.ScenePartial{ + GalleryIDs: &models.UpdateIDs{ + IDs: []int{galleryIDs[galleryIdx1WithImage]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + GalleryIDs: models.NewRelatedIDs([]int{galleryIDs[galleryIdxWithScene]}), + }, + false, + }, + { + "remove unrelated tags", + sceneIDs[sceneIdxWithTwoTags], + models.ScenePartial{ + TagIDs: &models.UpdateIDs{ + IDs: []int{tagIDs[tagIdx1WithPerformer]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + TagIDs: models.NewRelatedIDs(indexesToIDs(tagIDs, sceneTags[sceneIdxWithTwoTags])), + }, + false, + }, + { + "remove unrelated performers", + sceneIDs[sceneIdxWithTwoPerformers], + models.ScenePartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: []int{performerIDs[performerIdx1WithDupName]}, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + PerformerIDs: models.NewRelatedIDs(indexesToIDs(performerIDs, scenePerformers[sceneIdxWithTwoPerformers])), + }, + false, + }, + { + "remove unrelated movies", + sceneIDs[sceneIdxWithMovie], + models.ScenePartial{ + MovieIDs: &models.UpdateMovieIDs{ + Movies: []models.MoviesScenes{ + { + MovieID: movieIDs[movieIdxWithDupName], + }, + }, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + Movies: models.NewRelatedMovies([]models.MoviesScenes{ + { + MovieID: indexesToIDs(movieIDs, sceneMovies[sceneIdxWithMovie])[0], + }, + }), + }, + false, + }, + { + "remove unrelated stash ids", + sceneIDs[sceneIdxWithGallery], + models.ScenePartial{ + StashIDs: &models.UpdateStashIDs{ + StashIDs: stashIDs, + Mode: models.RelationshipUpdateModeRemove, + }, + }, + models.Scene{ + StashIDs: models.NewRelatedStashIDs([]models.StashID{sceneStashID(sceneIdxWithGallery)}), + }, + false, + }, + } - return nil - }) + for _, tt := range tests { + qb := db.Scene + + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + + got, err := qb.UpdatePartial(ctx, tt.id, tt.partial) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.UpdatePartial() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + s, err := qb.Find(ctx, tt.id) + if err != nil { + t.Errorf("sceneQueryBuilder.Find() error = %v", err) + } + + // load relationships + if err := loadSceneRelationships(ctx, tt.want, got); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return + } + if err := loadSceneRelationships(ctx, tt.want, s); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return + } + + // only compare fields that were in the partial + if tt.partial.PerformerIDs != nil { + assert.Equal(tt.want.PerformerIDs, got.PerformerIDs) + assert.Equal(tt.want.PerformerIDs, s.PerformerIDs) + } + if tt.partial.TagIDs != nil { + assert.Equal(tt.want.TagIDs, got.TagIDs) + assert.Equal(tt.want.TagIDs, s.TagIDs) + } + if tt.partial.GalleryIDs != nil { + assert.Equal(tt.want.GalleryIDs, got.GalleryIDs) + assert.Equal(tt.want.GalleryIDs, s.GalleryIDs) + } + if tt.partial.MovieIDs != nil { + assert.Equal(tt.want.Movies, got.Movies) + assert.Equal(tt.want.Movies, s.Movies) + } + if tt.partial.StashIDs != nil { + assert.Equal(tt.want.StashIDs, got.StashIDs) + assert.Equal(tt.want.StashIDs, s.StashIDs) + } + }) + } +} + +func Test_sceneQueryBuilder_IncrementOCounter(t *testing.T) { + tests := []struct { + name string + id int + want int + wantErr bool + }{ + { + "increment", + sceneIDs[1], + 2, + false, + }, + { + "invalid", + invalidID, + 0, + true, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.IncrementOCounter(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.IncrementOCounter() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("sceneQueryBuilder.IncrementOCounter() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_sceneQueryBuilder_DecrementOCounter(t *testing.T) { + tests := []struct { + name string + id int + want int + wantErr bool + }{ + { + "decrement", + sceneIDs[2], + 1, + false, + }, + { + "zero", + sceneIDs[0], + 0, + false, + }, + { + "invalid", + invalidID, + 0, + true, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.DecrementOCounter(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.DecrementOCounter() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("sceneQueryBuilder.DecrementOCounter() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_sceneQueryBuilder_ResetOCounter(t *testing.T) { + tests := []struct { + name string + id int + want int + wantErr bool + }{ + { + "decrement", + sceneIDs[2], + 0, + false, + }, + { + "zero", + sceneIDs[0], + 0, + false, + }, + { + "invalid", + invalidID, + 0, + true, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.ResetOCounter(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.ResetOCounter() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("sceneQueryBuilder.ResetOCounter() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_sceneQueryBuilder_Destroy(t *testing.T) { + tests := []struct { + name string + id int + wantErr bool + }{ + { + "valid", + sceneIDs[sceneIdxWithGallery], + false, + }, + { + "invalid", + invalidID, + true, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + withRollbackTxn(func(ctx context.Context) error { + if err := qb.Destroy(ctx, tt.id); (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.Destroy() error = %v, wantErr %v", err, tt.wantErr) + } + + // ensure cannot be found + i, err := qb.Find(ctx, tt.id) + + assert.NotNil(err) + assert.Nil(i) + return nil + }) + }) + } +} + +func makeSceneWithID(index int) *models.Scene { + ret := makeScene(index) + ret.ID = sceneIDs[index] + + if ret.Date != nil && ret.Date.IsZero() { + ret.Date = nil + } + + ret.Files = models.NewRelatedVideoFiles([]*file.VideoFile{makeSceneFile(index)}) + + return ret +} + +func Test_sceneQueryBuilder_Find(t *testing.T) { + tests := []struct { + name string + id int + want *models.Scene + wantErr bool + }{ + { + "valid", + sceneIDs[sceneIdxWithSpacedName], + makeSceneWithID(sceneIdxWithSpacedName), + false, + }, + { + "invalid", + invalidID, + nil, + true, + }, + { + "with galleries", + sceneIDs[sceneIdxWithGallery], + makeSceneWithID(sceneIdxWithGallery), + false, + }, + { + "with performers", + sceneIDs[sceneIdxWithTwoPerformers], + makeSceneWithID(sceneIdxWithTwoPerformers), + false, + }, + { + "with tags", + sceneIDs[sceneIdxWithTwoTags], + makeSceneWithID(sceneIdxWithTwoTags), + false, + }, + { + "with movies", + sceneIDs[sceneIdxWithMovie], + makeSceneWithID(sceneIdxWithMovie), + false, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + withTxn(func(ctx context.Context) error { + got, err := qb.Find(ctx, tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.Find() error = %v, wantErr %v", err, tt.wantErr) + return nil + } + + if got != nil { + // load relationships + if err := loadSceneRelationships(ctx, *tt.want, got); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return nil + } + + clearSceneFileIDs(got) + } + + assert.Equal(tt.want, got) + return nil + }) + }) + } +} + +func postFindScenes(ctx context.Context, want []*models.Scene, got []*models.Scene) error { + for i, s := range got { + // load relationships + if i < len(want) { + if err := loadSceneRelationships(ctx, *want[i], s); err != nil { + return err + } + } + clearSceneFileIDs(s) + } + + return nil +} + +func Test_sceneQueryBuilder_FindMany(t *testing.T) { + tests := []struct { + name string + ids []int + want []*models.Scene + wantErr bool + }{ + { + "valid with relationships", + []int{ + sceneIDs[sceneIdxWithGallery], + sceneIDs[sceneIdxWithTwoPerformers], + sceneIDs[sceneIdxWithTwoTags], + sceneIDs[sceneIdxWithMovie], + }, + []*models.Scene{ + makeSceneWithID(sceneIdxWithGallery), + makeSceneWithID(sceneIdxWithTwoPerformers), + makeSceneWithID(sceneIdxWithTwoTags), + makeSceneWithID(sceneIdxWithMovie), + }, + false, + }, + { + "invalid", + []int{sceneIDs[sceneIdxWithGallery], sceneIDs[sceneIdxWithTwoPerformers], invalidID}, + nil, + true, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindMany(ctx, tt.ids) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.FindMany() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindScenes(ctx, tt.want, got); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_sceneQueryBuilder_FindByChecksum(t *testing.T) { + getChecksum := func(index int) string { + return getSceneStringValue(index, checksumField) + } + + tests := []struct { + name string + checksum string + want []*models.Scene + wantErr bool + }{ + { + "valid", + getChecksum(sceneIdxWithSpacedName), + []*models.Scene{makeSceneWithID(sceneIdxWithSpacedName)}, + false, + }, + { + "invalid", + "invalid checksum", + nil, + false, + }, + { + "with galleries", + getChecksum(sceneIdxWithGallery), + []*models.Scene{makeSceneWithID(sceneIdxWithGallery)}, + false, + }, + { + "with performers", + getChecksum(sceneIdxWithTwoPerformers), + []*models.Scene{makeSceneWithID(sceneIdxWithTwoPerformers)}, + false, + }, + { + "with tags", + getChecksum(sceneIdxWithTwoTags), + []*models.Scene{makeSceneWithID(sceneIdxWithTwoTags)}, + false, + }, + { + "with movies", + getChecksum(sceneIdxWithMovie), + []*models.Scene{makeSceneWithID(sceneIdxWithMovie)}, + false, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + withTxn(func(ctx context.Context) error { + assert := assert.New(t) + got, err := qb.FindByChecksum(ctx, tt.checksum) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.FindByChecksum() error = %v, wantErr %v", err, tt.wantErr) + return nil + } + + if err := postFindScenes(ctx, tt.want, got); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return nil + } + + assert.Equal(tt.want, got) + + return nil + }) + }) + } +} + +func Test_sceneQueryBuilder_FindByOSHash(t *testing.T) { + getOSHash := func(index int) string { + return getSceneStringValue(index, "oshash") + } + + tests := []struct { + name string + oshash string + want []*models.Scene + wantErr bool + }{ + { + "valid", + getOSHash(sceneIdxWithSpacedName), + []*models.Scene{makeSceneWithID(sceneIdxWithSpacedName)}, + false, + }, + { + "invalid", + "invalid oshash", + nil, + false, + }, + { + "with galleries", + getOSHash(sceneIdxWithGallery), + []*models.Scene{makeSceneWithID(sceneIdxWithGallery)}, + false, + }, + { + "with performers", + getOSHash(sceneIdxWithTwoPerformers), + []*models.Scene{makeSceneWithID(sceneIdxWithTwoPerformers)}, + false, + }, + { + "with tags", + getOSHash(sceneIdxWithTwoTags), + []*models.Scene{makeSceneWithID(sceneIdxWithTwoTags)}, + false, + }, + { + "with movies", + getOSHash(sceneIdxWithMovie), + []*models.Scene{makeSceneWithID(sceneIdxWithMovie)}, + false, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + withTxn(func(ctx context.Context) error { + got, err := qb.FindByOSHash(ctx, tt.oshash) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.FindByOSHash() error = %v, wantErr %v", err, tt.wantErr) + return nil + } + + if err := postFindScenes(ctx, tt.want, got); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return nil + } + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("sceneQueryBuilder.FindByOSHash() = %v, want %v", got, tt.want) + } + return nil + }) + }) + } +} + +func Test_sceneQueryBuilder_FindByPath(t *testing.T) { + getPath := func(index int) string { + return getFilePath(folderIdxWithSceneFiles, getSceneBasename(index)) + } + + tests := []struct { + name string + path string + want []*models.Scene + wantErr bool + }{ + { + "valid", + getPath(sceneIdxWithSpacedName), + []*models.Scene{makeSceneWithID(sceneIdxWithSpacedName)}, + false, + }, + { + "invalid", + "invalid path", + nil, + false, + }, + { + "with galleries", + getPath(sceneIdxWithGallery), + []*models.Scene{makeSceneWithID(sceneIdxWithGallery)}, + false, + }, + { + "with performers", + getPath(sceneIdxWithTwoPerformers), + []*models.Scene{makeSceneWithID(sceneIdxWithTwoPerformers)}, + false, + }, + { + "with tags", + getPath(sceneIdxWithTwoTags), + []*models.Scene{makeSceneWithID(sceneIdxWithTwoTags)}, + false, + }, + { + "with movies", + getPath(sceneIdxWithMovie), + []*models.Scene{makeSceneWithID(sceneIdxWithMovie)}, + false, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + withTxn(func(ctx context.Context) error { + assert := assert.New(t) + got, err := qb.FindByPath(ctx, tt.path) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.FindByPath() error = %v, wantErr %v", err, tt.wantErr) + return nil + } + + if err := postFindScenes(ctx, tt.want, got); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return nil + } + + assert.Equal(tt.want, got) + + return nil + }) + }) + } +} + +func Test_sceneQueryBuilder_FindByGalleryID(t *testing.T) { + tests := []struct { + name string + galleryID int + want []*models.Scene + wantErr bool + }{ + { + "valid", + galleryIDs[galleryIdxWithScene], + []*models.Scene{makeSceneWithID(sceneIdxWithGallery)}, + false, + }, + { + "none", + galleryIDs[galleryIdx1WithPerformer], + nil, + false, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByGalleryID(ctx, tt.galleryID) + if (err != nil) != tt.wantErr { + t.Errorf("sceneQueryBuilder.FindByGalleryID() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err := postFindScenes(ctx, tt.want, got); err != nil { + t.Errorf("loadSceneRelationships() error = %v", err) + return + } + + assert.Equal(tt.want, got) + return + }) + } } func TestSceneCountByPerformerID(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() - count, err := sqb.CountByPerformerID(performerIDs[performerIdxWithScene]) + withTxn(func(ctx context.Context) error { + sqb := db.Scene + count, err := sqb.CountByPerformerID(ctx, performerIDs[performerIdxWithScene]) if err != nil { t.Errorf("Error counting scenes: %s", err.Error()) @@ -84,7 +1747,7 @@ func TestSceneCountByPerformerID(t *testing.T) { assert.Equal(t, 1, count) - count, err = sqb.CountByPerformerID(0) + count, err = sqb.CountByPerformerID(ctx, 0) if err != nil { t.Errorf("Error counting scenes: %s", err.Error()) @@ -96,28 +1759,177 @@ func TestSceneCountByPerformerID(t *testing.T) { }) } +func scenesToIDs(i []*models.Scene) []int { + var ret []int + for _, ii := range i { + ret = append(ret, ii.ID) + } + + return ret +} + +func Test_sceneStore_FindByFileID(t *testing.T) { + tests := []struct { + name string + fileID file.ID + include []int + exclude []int + }{ + { + "valid", + sceneFileIDs[sceneIdx1WithPerformer], + []int{sceneIdx1WithPerformer}, + nil, + }, + { + "invalid", + invalidFileID, + nil, + []int{sceneIdx1WithPerformer}, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.FindByFileID(ctx, tt.fileID) + if err != nil { + t.Errorf("SceneStore.FindByFileID() error = %v", err) + return + } + for _, f := range got { + clearSceneFileIDs(f) + } + + ids := scenesToIDs(got) + include := indexesToIDs(galleryIDs, tt.include) + exclude := indexesToIDs(galleryIDs, tt.exclude) + + for _, i := range include { + assert.Contains(ids, i) + } + for _, e := range exclude { + assert.NotContains(ids, e) + } + }) + } +} + +func Test_sceneStore_CountByFileID(t *testing.T) { + tests := []struct { + name string + fileID file.ID + want int + }{ + { + "valid", + sceneFileIDs[sceneIdxWithTwoPerformers], + 1, + }, + { + "invalid", + invalidFileID, + 0, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.CountByFileID(ctx, tt.fileID) + if err != nil { + t.Errorf("SceneStore.CountByFileID() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_sceneStore_CountMissingChecksum(t *testing.T) { + tests := []struct { + name string + want int + }{ + { + "valid", + 0, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.CountMissingChecksum(ctx) + if err != nil { + t.Errorf("SceneStore.CountMissingChecksum() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + +func Test_sceneStore_CountMissingOshash(t *testing.T) { + tests := []struct { + name string + want int + }{ + { + "valid", + 0, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.CountMissingOSHash(ctx) + if err != nil { + t.Errorf("SceneStore.CountMissingOSHash() error = %v", err) + return + } + + assert.Equal(tt.want, got) + }) + } +} + func TestSceneWall(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene const sceneIdx = 2 wallQuery := getSceneStringValue(sceneIdx, "Details") - scenes, err := sqb.Wall(&wallQuery) + scenes, err := sqb.Wall(ctx, &wallQuery) if err != nil { t.Errorf("Error finding scenes: %s", err.Error()) + return nil } assert.Len(t, scenes, 1) scene := scenes[0] assert.Equal(t, sceneIDs[sceneIdx], scene.ID) - assert.Equal(t, getSceneStringValue(sceneIdx, "Path"), scene.Path) + scenePath := getFilePath(folderIdxWithSceneFiles, getSceneBasename(sceneIdx)) + assert.Equal(t, scenePath, scene.Path) wallQuery = "not exist" - scenes, err = sqb.Wall(&wallQuery) + scenes, err = sqb.Wall(ctx, &wallQuery) if err != nil { t.Errorf("Error finding scene: %s", err.Error()) + return nil } assert.Len(t, scenes, 0) @@ -131,28 +1943,32 @@ func TestSceneQueryQ(t *testing.T) { q := getSceneStringValue(sceneIdx, titleField) - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene - sceneQueryQ(t, sqb, q, sceneIdx) + sceneQueryQ(ctx, t, sqb, q, sceneIdx) return nil }) } -func queryScene(t *testing.T, sqb models.SceneReader, sceneFilter *models.SceneFilterType, findFilter *models.FindFilterType) []*models.Scene { +func queryScene(ctx context.Context, t *testing.T, sqb models.SceneReader, sceneFilter *models.SceneFilterType, findFilter *models.FindFilterType) []*models.Scene { t.Helper() - result, err := sqb.Query(models.SceneQueryOptions{ + result, err := sqb.Query(ctx, models.SceneQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: findFilter, + Count: true, }, - SceneFilter: sceneFilter, + SceneFilter: sceneFilter, + TotalDuration: true, + TotalSize: true, }) if err != nil { t.Errorf("Error querying scene: %v", err) + return nil } - scenes, err := result.Resolve() + scenes, err := result.Resolve(ctx) if err != nil { t.Errorf("Error resolving scenes: %v", err) } @@ -160,51 +1976,205 @@ func queryScene(t *testing.T, sqb models.SceneReader, sceneFilter *models.SceneF return scenes } -func sceneQueryQ(t *testing.T, sqb models.SceneReader, q string, expectedSceneIdx int) { +func sceneQueryQ(ctx context.Context, t *testing.T, sqb models.SceneReader, q string, expectedSceneIdx int) { filter := models.FindFilterType{ Q: &q, } - scenes := queryScene(t, sqb, nil, &filter) + scenes := queryScene(ctx, t, sqb, nil, &filter) - assert.Len(t, scenes, 1) + if !assert.Len(t, scenes, 1) { + return + } scene := scenes[0] assert.Equal(t, sceneIDs[expectedSceneIdx], scene.ID) // no Q should return all results filter.Q = nil - scenes = queryScene(t, sqb, nil, &filter) + scenes = queryScene(ctx, t, sqb, nil, &filter) assert.Len(t, scenes, totalScenes) } func TestSceneQueryPath(t *testing.T) { - const sceneIdx = 1 - scenePath := getSceneStringValue(sceneIdx, "Path") + const ( + sceneIdx = 1 + otherSceneIdx = 2 + ) + folder := folderPaths[folderIdxWithSceneFiles] + basename := getSceneBasename(sceneIdx) + scenePath := getFilePath(folderIdxWithSceneFiles, getSceneBasename(sceneIdx)) - pathCriterion := models.StringCriterionInput{ - Value: scenePath, - Modifier: models.CriterionModifierEquals, + tests := []struct { + name string + input models.StringCriterionInput + mustInclude []int + mustExclude []int + }{ + { + "equals full path", + models.StringCriterionInput{ + Value: scenePath, + Modifier: models.CriterionModifierEquals, + }, + []int{sceneIdx}, + []int{otherSceneIdx}, + }, + { + "equals folder name", + models.StringCriterionInput{ + Value: folder, + Modifier: models.CriterionModifierEquals, + }, + []int{sceneIdx}, + nil, + }, + { + "equals folder name trailing slash", + models.StringCriterionInput{ + Value: folder + string(filepath.Separator), + Modifier: models.CriterionModifierEquals, + }, + []int{sceneIdx}, + nil, + }, + { + "equals base name", + models.StringCriterionInput{ + Value: basename, + Modifier: models.CriterionModifierEquals, + }, + []int{sceneIdx}, + nil, + }, + { + "equals base name leading slash", + models.StringCriterionInput{ + Value: string(filepath.Separator) + basename, + Modifier: models.CriterionModifierEquals, + }, + []int{sceneIdx}, + nil, + }, + { + "equals full path wildcard", + models.StringCriterionInput{ + Value: filepath.Join(folder, "scene_0001_%"), + Modifier: models.CriterionModifierEquals, + }, + []int{sceneIdx}, + []int{otherSceneIdx}, + }, + { + "not equals full path", + models.StringCriterionInput{ + Value: scenePath, + Modifier: models.CriterionModifierNotEquals, + }, + []int{otherSceneIdx}, + []int{sceneIdx}, + }, + { + "not equals folder name", + models.StringCriterionInput{ + Value: folder, + Modifier: models.CriterionModifierNotEquals, + }, + nil, + []int{sceneIdx}, + }, + { + "not equals basename", + models.StringCriterionInput{ + Value: basename, + Modifier: models.CriterionModifierNotEquals, + }, + nil, + []int{sceneIdx}, + }, + { + "includes folder name", + models.StringCriterionInput{ + Value: folder, + Modifier: models.CriterionModifierIncludes, + }, + []int{sceneIdx}, + nil, + }, + { + "includes base name", + models.StringCriterionInput{ + Value: basename, + Modifier: models.CriterionModifierIncludes, + }, + []int{sceneIdx}, + nil, + }, + { + "includes full path", + models.StringCriterionInput{ + Value: scenePath, + Modifier: models.CriterionModifierIncludes, + }, + []int{sceneIdx}, + []int{otherSceneIdx}, + }, + { + "matches regex", + models.StringCriterionInput{ + Value: "scene_.*1_Path", + Modifier: models.CriterionModifierMatchesRegex, + }, + []int{sceneIdx}, + nil, + }, + { + "not matches regex", + models.StringCriterionInput{ + Value: "scene_.*1_Path", + Modifier: models.CriterionModifierNotMatchesRegex, + }, + nil, + []int{sceneIdx}, + }, } - verifyScenesPath(t, pathCriterion) + qb := db.Scene - pathCriterion.Modifier = models.CriterionModifierNotEquals - verifyScenesPath(t, pathCriterion) + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + got, err := qb.Query(ctx, models.SceneQueryOptions{ + SceneFilter: &models.SceneFilterType{ + Path: &tt.input, + }, + }) - pathCriterion.Modifier = models.CriterionModifierMatchesRegex - pathCriterion.Value = "scene_.*1_Path" - verifyScenesPath(t, pathCriterion) + if err != nil { + t.Errorf("sceneQueryBuilder.TestSceneQueryPath() error = %v", err) + return + } - pathCriterion.Modifier = models.CriterionModifierNotMatchesRegex - verifyScenesPath(t, pathCriterion) + mustInclude := indexesToIDs(sceneIDs, tt.mustInclude) + mustExclude := indexesToIDs(sceneIDs, tt.mustExclude) + + missing := intslice.IntExclude(mustInclude, got.IDs) + if len(missing) > 0 { + t.Errorf("SceneStore.TestSceneQueryPath() missing expected IDs: %v", missing) + } + + notExcluded := intslice.IntIntercect(mustExclude, got.IDs) + if len(notExcluded) > 0 { + t.Errorf("SceneStore.TestSceneQueryPath() expected IDs to be excluded: %v", notExcluded) + } + }) + } } func TestSceneQueryURL(t *testing.T) { const sceneIdx = 1 - scenePath := getSceneStringValue(sceneIdx, urlField) + sceneURL := getSceneStringValue(sceneIdx, urlField) urlCriterion := models.StringCriterionInput{ - Value: scenePath, + Value: sceneURL, Modifier: models.CriterionModifierEquals, } @@ -214,7 +2184,7 @@ func TestSceneQueryURL(t *testing.T) { verifyFn := func(s *models.Scene) { t.Helper() - verifyNullString(t, s.URL, urlCriterion) + verifyString(t, s.URL, urlCriterion) } verifySceneQuery(t, filter, verifyFn) @@ -241,8 +2211,8 @@ func TestSceneQueryPathOr(t *testing.T) { const scene1Idx = 1 const scene2Idx = 2 - scene1Path := getSceneStringValue(scene1Idx, "Path") - scene2Path := getSceneStringValue(scene2Idx, "Path") + scene1Path := getFilePath(folderIdxWithSceneFiles, getSceneBasename(scene1Idx)) + scene2Path := getFilePath(folderIdxWithSceneFiles, getSceneBasename(scene2Idx)) sceneFilter := models.SceneFilterType{ Path: &models.StringCriterionInput{ @@ -257,12 +2227,14 @@ func TestSceneQueryPathOr(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) - assert.Len(t, scenes, 2) + if !assert.Len(t, scenes, 2) { + return nil + } assert.Equal(t, scene1Path, scenes[0].Path) assert.Equal(t, scene2Path, scenes[1].Path) @@ -272,8 +2244,8 @@ func TestSceneQueryPathOr(t *testing.T) { func TestSceneQueryPathAndRating(t *testing.T) { const sceneIdx = 1 - scenePath := getSceneStringValue(sceneIdx, "Path") - sceneRating := getRating(sceneIdx) + scenePath := getFilePath(folderIdxWithSceneFiles, getSceneBasename(sceneIdx)) + sceneRating := int(getRating(sceneIdx).Int64) sceneFilter := models.SceneFilterType{ Path: &models.StringCriterionInput{ @@ -282,20 +2254,22 @@ func TestSceneQueryPathAndRating(t *testing.T) { }, And: &models.SceneFilterType{ Rating: &models.IntCriterionInput{ - Value: int(sceneRating.Int64), + Value: sceneRating, Modifier: models.CriterionModifierEquals, }, }, } - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) - assert.Len(t, scenes, 1) + if !assert.Len(t, scenes, 1) { + return nil + } assert.Equal(t, scenePath, scenes[0].Path) - assert.Equal(t, sceneRating.Int64, scenes[0].Rating.Int64) + assert.Equal(t, sceneRating, *scenes[0].Rating) return nil }) @@ -323,15 +2297,15 @@ func TestSceneQueryPathNotRating(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) for _, scene := range scenes { verifyString(t, scene.Path, pathCriterion) ratingCriterion.Modifier = models.CriterionModifierNotEquals - verifyInt64(t, scene.Rating, ratingCriterion) + verifyIntPtr(t, scene.Rating, ratingCriterion) } return nil @@ -354,24 +2328,24 @@ func TestSceneIllegalQuery(t *testing.T) { Or: &subFilter, } - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene queryOptions := models.SceneQueryOptions{ SceneFilter: sceneFilter, } - _, err := sqb.Query(queryOptions) + _, err := sqb.Query(ctx, queryOptions) assert.NotNil(err) sceneFilter.Or = nil sceneFilter.Not = &subFilter - _, err = sqb.Query(queryOptions) + _, err = sqb.Query(ctx, queryOptions) assert.NotNil(err) sceneFilter.And = nil sceneFilter.Or = &subFilter - _, err = sqb.Query(queryOptions) + _, err = sqb.Query(ctx, queryOptions) assert.NotNil(err) return nil @@ -379,11 +2353,12 @@ func TestSceneIllegalQuery(t *testing.T) { } func verifySceneQuery(t *testing.T, filter models.SceneFilterType, verifyFn func(s *models.Scene)) { - withTxn(func(r models.Repository) error { + t.Helper() + withTxn(func(ctx context.Context) error { t.Helper() - sqb := r.Scene() + sqb := db.Scene - scenes := queryScene(t, sqb, &filter, nil) + scenes := queryScene(ctx, t, sqb, &filter, nil) // assume it should find at least one assert.Greater(t, len(scenes), 0) @@ -397,13 +2372,13 @@ func verifySceneQuery(t *testing.T, filter models.SceneFilterType, verifyFn func } func verifyScenesPath(t *testing.T, pathCriterion models.StringCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene sceneFilter := models.SceneFilterType{ Path: &pathCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) for _, scene := range scenes { verifyString(t, scene.Path, pathCriterion) @@ -446,20 +2421,55 @@ func verifyNullString(t *testing.T, value sql.NullString, criterion models.Strin } } +func verifyStringPtr(t *testing.T, value *string, criterion models.StringCriterionInput) { + t.Helper() + assert := assert.New(t) + if criterion.Modifier == models.CriterionModifierIsNull { + if value != nil && *value == "" { + // correct + return + } + assert.Nil(value, "expect is null values to be null") + } + if criterion.Modifier == models.CriterionModifierNotNull { + assert.NotNil(value, "expect is null values to be null") + assert.Greater(len(*value), 0) + } + if criterion.Modifier == models.CriterionModifierEquals { + assert.Equal(criterion.Value, *value) + } + if criterion.Modifier == models.CriterionModifierNotEquals { + assert.NotEqual(criterion.Value, *value) + } + if criterion.Modifier == models.CriterionModifierMatchesRegex { + assert.NotNil(value) + assert.Regexp(regexp.MustCompile(criterion.Value), *value) + } + if criterion.Modifier == models.CriterionModifierNotMatchesRegex { + if value == nil { + // correct + return + } + assert.NotRegexp(regexp.MustCompile(criterion.Value), value) + } +} + func verifyString(t *testing.T, value string, criterion models.StringCriterionInput) { t.Helper() assert := assert.New(t) - if criterion.Modifier == models.CriterionModifierEquals { + switch criterion.Modifier { + case models.CriterionModifierEquals: assert.Equal(criterion.Value, value) - } - if criterion.Modifier == models.CriterionModifierNotEquals { + case models.CriterionModifierNotEquals: assert.NotEqual(criterion.Value, value) - } - if criterion.Modifier == models.CriterionModifierMatchesRegex { + case models.CriterionModifierMatchesRegex: assert.Regexp(regexp.MustCompile(criterion.Value), value) - } - if criterion.Modifier == models.CriterionModifierNotMatchesRegex { + case models.CriterionModifierNotMatchesRegex: assert.NotRegexp(regexp.MustCompile(criterion.Value), value) + case models.CriterionModifierIsNull: + assert.Equal("", value) + case models.CriterionModifierNotNull: + assert.NotEqual("", value) } } @@ -489,16 +2499,16 @@ func TestSceneQueryRating(t *testing.T) { } func verifyScenesRating(t *testing.T, ratingCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene sceneFilter := models.SceneFilterType{ Rating: &ratingCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) for _, scene := range scenes { - verifyInt64(t, scene.Rating, ratingCriterion) + verifyIntPtr(t, scene.Rating, ratingCriterion) } return nil @@ -528,6 +2538,29 @@ func verifyInt64(t *testing.T, value sql.NullInt64, criterion models.IntCriterio } } +func verifyIntPtr(t *testing.T, value *int, criterion models.IntCriterionInput) { + t.Helper() + assert := assert.New(t) + if criterion.Modifier == models.CriterionModifierIsNull { + assert.Nil(value, "expect is null values to be null") + } + if criterion.Modifier == models.CriterionModifierNotNull { + assert.NotNil(value, "expect is null values to be null") + } + if criterion.Modifier == models.CriterionModifierEquals { + assert.Equal(criterion.Value, *value) + } + if criterion.Modifier == models.CriterionModifierNotEquals { + assert.NotEqual(criterion.Value, *value) + } + if criterion.Modifier == models.CriterionModifierGreaterThan { + assert.True(*value > criterion.Value) + } + if criterion.Modifier == models.CriterionModifierLessThan { + assert.True(*value < criterion.Value) + } +} + func TestSceneQueryOCounter(t *testing.T) { const oCounter = 1 oCounterCriterion := models.IntCriterionInput{ @@ -548,13 +2581,13 @@ func TestSceneQueryOCounter(t *testing.T) { } func verifyScenesOCounter(t *testing.T, oCounterCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene sceneFilter := models.SceneFilterType{ OCounter: &oCounterCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) for _, scene := range scenes { verifyInt(t, scene.OCounter, oCounterCriterion) @@ -607,21 +2640,27 @@ func TestSceneQueryDuration(t *testing.T) { } func verifyScenesDuration(t *testing.T, durationCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene sceneFilter := models.SceneFilterType{ Duration: &durationCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) for _, scene := range scenes { + if err := scene.LoadPrimaryFile(ctx, db.File); err != nil { + t.Errorf("Error querying scene files: %v", err) + return nil + } + + duration := scene.Files.Primary().Duration if durationCriterion.Modifier == models.CriterionModifierEquals { - assert.True(t, scene.Duration.Float64 >= float64(durationCriterion.Value) && scene.Duration.Float64 < float64(durationCriterion.Value+1)) + assert.True(t, duration >= float64(durationCriterion.Value) && duration < float64(durationCriterion.Value+1)) } else if durationCriterion.Modifier == models.CriterionModifierNotEquals { - assert.True(t, scene.Duration.Float64 < float64(durationCriterion.Value) || scene.Duration.Float64 >= float64(durationCriterion.Value+1)) + assert.True(t, duration < float64(durationCriterion.Value) || duration >= float64(durationCriterion.Value+1)) } else { - verifyFloat64(t, scene.Duration, durationCriterion) + verifyFloat64(t, duration, durationCriterion) } } @@ -629,25 +2668,37 @@ func verifyScenesDuration(t *testing.T, durationCriterion models.IntCriterionInp }) } -func verifyFloat64(t *testing.T, value sql.NullFloat64, criterion models.IntCriterionInput) { +func verifyFloat64(t *testing.T, value float64, criterion models.IntCriterionInput) { assert := assert.New(t) - if criterion.Modifier == models.CriterionModifierIsNull { - assert.False(value.Valid, "expect is null values to be null") - } - if criterion.Modifier == models.CriterionModifierNotNull { - assert.True(value.Valid, "expect is null values to be null") - } if criterion.Modifier == models.CriterionModifierEquals { - assert.Equal(float64(criterion.Value), value.Float64) + assert.Equal(float64(criterion.Value), value) } if criterion.Modifier == models.CriterionModifierNotEquals { - assert.NotEqual(float64(criterion.Value), value.Float64) + assert.NotEqual(float64(criterion.Value), value) } if criterion.Modifier == models.CriterionModifierGreaterThan { - assert.True(value.Float64 > float64(criterion.Value)) + assert.True(value > float64(criterion.Value)) } if criterion.Modifier == models.CriterionModifierLessThan { - assert.True(value.Float64 < float64(criterion.Value)) + assert.True(value < float64(criterion.Value)) + } +} + +func verifyFloat64Ptr(t *testing.T, value *float64, criterion models.IntCriterionInput) { + assert := assert.New(t) + switch criterion.Modifier { + case models.CriterionModifierIsNull: + assert.Nil(value, "expect is null values to be null") + case models.CriterionModifierNotNull: + assert.NotNil(value, "expect is not null values to not be null") + case models.CriterionModifierEquals: + assert.EqualValues(float64(criterion.Value), value) + case models.CriterionModifierNotEquals: + assert.NotEqualValues(float64(criterion.Value), value) + case models.CriterionModifierGreaterThan: + assert.True(value != nil && *value > float64(criterion.Value)) + case models.CriterionModifierLessThan: + assert.True(value != nil && *value < float64(criterion.Value)) } } @@ -661,8 +2712,8 @@ func TestSceneQueryResolution(t *testing.T) { } func verifyScenesResolution(t *testing.T, resolution models.ResolutionEnum) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene sceneFilter := models.SceneFilterType{ Resolution: &models.ResolutionCriterionInput{ Value: resolution, @@ -670,19 +2721,37 @@ func verifyScenesResolution(t *testing.T, resolution models.ResolutionEnum) { }, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) for _, scene := range scenes { - verifySceneResolution(t, scene.Height, resolution) + if err := scene.LoadPrimaryFile(ctx, db.File); err != nil { + t.Errorf("Error querying scene files: %v", err) + return nil + } + f := scene.Files.Primary() + height := 0 + if f != nil { + height = f.Height + } + verifySceneResolution(t, &height, resolution) } return nil }) } -func verifySceneResolution(t *testing.T, height sql.NullInt64, resolution models.ResolutionEnum) { +func verifySceneResolution(t *testing.T, height *int, resolution models.ResolutionEnum) { + if !resolution.IsValid() { + return + } + assert := assert.New(t) - h := height.Int64 + assert.NotNil(height) + if t.Failed() { + return + } + + h := *height switch resolution { case models.ResolutionEnumLow: @@ -706,20 +2775,20 @@ func TestAllResolutionsHaveResolutionRange(t *testing.T) { } func TestSceneQueryResolutionModifiers(t *testing.T) { - if err := withRollbackTxn(func(r models.Repository) error { - qb := r.Scene() - sceneNoResolution, _ := createScene(qb, 0, 0) - firstScene540P, _ := createScene(qb, 960, 540) - secondScene540P, _ := createScene(qb, 1280, 719) - firstScene720P, _ := createScene(qb, 1280, 720) - secondScene720P, _ := createScene(qb, 1280, 721) - thirdScene720P, _ := createScene(qb, 1920, 1079) - scene1080P, _ := createScene(qb, 1920, 1080) + if err := withRollbackTxn(func(ctx context.Context) error { + qb := db.Scene + sceneNoResolution, _ := createScene(ctx, 0, 0) + firstScene540P, _ := createScene(ctx, 960, 540) + secondScene540P, _ := createScene(ctx, 1280, 719) + firstScene720P, _ := createScene(ctx, 1280, 720) + secondScene720P, _ := createScene(ctx, 1280, 721) + thirdScene720P, _ := createScene(ctx, 1920, 1079) + scene1080P, _ := createScene(ctx, 1920, 1080) - scenesEqualTo720P := queryScenes(t, qb, models.ResolutionEnumStandardHd, models.CriterionModifierEquals) - scenesNotEqualTo720P := queryScenes(t, qb, models.ResolutionEnumStandardHd, models.CriterionModifierNotEquals) - scenesGreaterThan720P := queryScenes(t, qb, models.ResolutionEnumStandardHd, models.CriterionModifierGreaterThan) - scenesLessThan720P := queryScenes(t, qb, models.ResolutionEnumStandardHd, models.CriterionModifierLessThan) + scenesEqualTo720P := queryScenes(ctx, t, qb, models.ResolutionEnumStandardHd, models.CriterionModifierEquals) + scenesNotEqualTo720P := queryScenes(ctx, t, qb, models.ResolutionEnumStandardHd, models.CriterionModifierNotEquals) + scenesGreaterThan720P := queryScenes(ctx, t, qb, models.ResolutionEnumStandardHd, models.CriterionModifierGreaterThan) + scenesLessThan720P := queryScenes(ctx, t, qb, models.ResolutionEnumStandardHd, models.CriterionModifierLessThan) assert.Subset(t, scenesEqualTo720P, []*models.Scene{firstScene720P, secondScene720P, thirdScene720P}) assert.NotSubset(t, scenesEqualTo720P, []*models.Scene{sceneNoResolution, firstScene540P, secondScene540P, scene1080P}) @@ -739,7 +2808,7 @@ func TestSceneQueryResolutionModifiers(t *testing.T) { } } -func queryScenes(t *testing.T, queryBuilder models.SceneReaderWriter, resolution models.ResolutionEnum, modifier models.CriterionModifier) []*models.Scene { +func queryScenes(ctx context.Context, t *testing.T, queryBuilder models.SceneReaderWriter, resolution models.ResolutionEnum, modifier models.CriterionModifier) []*models.Scene { sceneFilter := models.SceneFilterType{ Resolution: &models.ResolutionCriterionInput{ Value: resolution, @@ -747,30 +2816,37 @@ func queryScenes(t *testing.T, queryBuilder models.SceneReaderWriter, resolution }, } - return queryScene(t, queryBuilder, &sceneFilter, nil) + return queryScene(ctx, t, queryBuilder, &sceneFilter, nil) } -func createScene(queryBuilder models.SceneReaderWriter, width int64, height int64) (*models.Scene, error) { +func createScene(ctx context.Context, width int, height int) (*models.Scene, error) { name := fmt.Sprintf("TestSceneQueryResolutionModifiers %d %d", width, height) - scene := models.Scene{ - Path: name, - Width: sql.NullInt64{ - Int64: width, - Valid: true, + + sceneFile := &file.VideoFile{ + BaseFile: &file.BaseFile{ + Basename: name, + ParentFolderID: folderIDs[folderIdxWithSceneFiles], }, - Height: sql.NullInt64{ - Int64: height, - Valid: true, - }, - Checksum: sql.NullString{String: md5.FromString(name), Valid: true}, + Width: width, + Height: height, } - return queryBuilder.Create(scene) + if err := db.File.Create(ctx, sceneFile); err != nil { + return nil, err + } + + scene := &models.Scene{} + + if err := db.Scene.Create(ctx, scene, []file.ID{sceneFile.ID}); err != nil { + return nil, err + } + + return scene, nil } func TestSceneQueryHasMarkers(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene hasMarkers := "true" sceneFilter := models.SceneFilterType{ HasMarkers: &hasMarkers, @@ -781,17 +2857,17 @@ func TestSceneQueryHasMarkers(t *testing.T) { Q: &q, } - scenes := queryScene(t, sqb, &sceneFilter, &findFilter) + scenes := queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 1) assert.Equal(t, sceneIDs[sceneIdxWithMarkers], scenes[0].ID) hasMarkers = "false" - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) findFilter.Q = nil - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.NotEqual(t, 0, len(scenes)) @@ -805,8 +2881,8 @@ func TestSceneQueryHasMarkers(t *testing.T) { } func TestSceneQueryIsMissingGallery(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene isMissing := "galleries" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -817,12 +2893,12 @@ func TestSceneQueryIsMissingGallery(t *testing.T) { Q: &q, } - scenes := queryScene(t, sqb, &sceneFilter, &findFilter) + scenes := queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) findFilter.Q = nil - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) // ensure non of the ids equal the one with gallery for _, scene := range scenes { @@ -834,8 +2910,8 @@ func TestSceneQueryIsMissingGallery(t *testing.T) { } func TestSceneQueryIsMissingStudio(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene isMissing := "studio" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -846,12 +2922,12 @@ func TestSceneQueryIsMissingStudio(t *testing.T) { Q: &q, } - scenes := queryScene(t, sqb, &sceneFilter, &findFilter) + scenes := queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) findFilter.Q = nil - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) // ensure non of the ids equal the one with studio for _, scene := range scenes { @@ -863,8 +2939,8 @@ func TestSceneQueryIsMissingStudio(t *testing.T) { } func TestSceneQueryIsMissingMovies(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene isMissing := "movie" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -875,12 +2951,12 @@ func TestSceneQueryIsMissingMovies(t *testing.T) { Q: &q, } - scenes := queryScene(t, sqb, &sceneFilter, &findFilter) + scenes := queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) findFilter.Q = nil - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) // ensure non of the ids equal the one with movies for _, scene := range scenes { @@ -892,8 +2968,8 @@ func TestSceneQueryIsMissingMovies(t *testing.T) { } func TestSceneQueryIsMissingPerformers(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene isMissing := "performers" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -904,12 +2980,12 @@ func TestSceneQueryIsMissingPerformers(t *testing.T) { Q: &q, } - scenes := queryScene(t, sqb, &sceneFilter, &findFilter) + scenes := queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) findFilter.Q = nil - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.True(t, len(scenes) > 0) @@ -923,21 +2999,21 @@ func TestSceneQueryIsMissingPerformers(t *testing.T) { } func TestSceneQueryIsMissingDate(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene isMissing := "date" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) // three in four scenes have no date assert.Len(t, scenes, int(math.Ceil(float64(totalScenes)/4*3))) // ensure date is null, empty or "0001-01-01" for _, scene := range scenes { - assert.True(t, !scene.Date.Valid || scene.Date.String == "" || scene.Date.String == "0001-01-01") + assert.True(t, scene.Date == nil || scene.Date.Time == time.Time{}) } return nil @@ -945,8 +3021,8 @@ func TestSceneQueryIsMissingDate(t *testing.T) { } func TestSceneQueryIsMissingTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene isMissing := "tags" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -957,12 +3033,12 @@ func TestSceneQueryIsMissingTags(t *testing.T) { Q: &q, } - scenes := queryScene(t, sqb, &sceneFilter, &findFilter) + scenes := queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) findFilter.Q = nil - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.True(t, len(scenes) > 0) @@ -971,29 +3047,49 @@ func TestSceneQueryIsMissingTags(t *testing.T) { } func TestSceneQueryIsMissingRating(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene isMissing := "rating" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) assert.True(t, len(scenes) > 0) // ensure date is null, empty or "0001-01-01" for _, scene := range scenes { - assert.True(t, !scene.Rating.Valid) + assert.Nil(t, scene.Rating) } return nil }) } +func TestSceneQueryIsMissingPhash(t *testing.T) { + withTxn(func(ctx context.Context) error { + sqb := db.Scene + isMissing := "phash" + sceneFilter := models.SceneFilterType{ + IsMissing: &isMissing, + } + + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) + + if !assert.Len(t, scenes, 1) { + return nil + } + + assert.Equal(t, sceneIDs[sceneIdxMissingPhash], scenes[0].ID) + + return nil + }) +} + func TestSceneQueryPerformers(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene performerCriterion := models.MultiCriterionInput{ Value: []string{ strconv.Itoa(performerIDs[performerIdxWithScene]), @@ -1006,7 +3102,7 @@ func TestSceneQueryPerformers(t *testing.T) { Performers: &performerCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 2) @@ -1023,7 +3119,7 @@ func TestSceneQueryPerformers(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - scenes = queryScene(t, sqb, &sceneFilter, nil) + scenes = queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 1) assert.Equal(t, sceneIDs[sceneIdxWithTwoPerformers], scenes[0].ID) @@ -1040,7 +3136,7 @@ func TestSceneQueryPerformers(t *testing.T) { Q: &q, } - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) return nil @@ -1048,8 +3144,8 @@ func TestSceneQueryPerformers(t *testing.T) { } func TestSceneQueryTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithScene]), @@ -1062,7 +3158,7 @@ func TestSceneQueryTags(t *testing.T) { Tags: &tagCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 2) // ensure ids are correct @@ -1078,7 +3174,7 @@ func TestSceneQueryTags(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - scenes = queryScene(t, sqb, &sceneFilter, nil) + scenes = queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 1) assert.Equal(t, sceneIDs[sceneIdxWithTwoTags], scenes[0].ID) @@ -1095,7 +3191,7 @@ func TestSceneQueryTags(t *testing.T) { Q: &q, } - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) return nil @@ -1103,8 +3199,8 @@ func TestSceneQueryTags(t *testing.T) { } func TestSceneQueryPerformerTags(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithPerformer]), @@ -1117,7 +3213,7 @@ func TestSceneQueryPerformerTags(t *testing.T) { PerformerTags: &tagCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 2) // ensure ids are correct @@ -1133,7 +3229,7 @@ func TestSceneQueryPerformerTags(t *testing.T) { Modifier: models.CriterionModifierIncludesAll, } - scenes = queryScene(t, sqb, &sceneFilter, nil) + scenes = queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 1) assert.Equal(t, sceneIDs[sceneIdxWithPerformerTwoTags], scenes[0].ID) @@ -1150,7 +3246,7 @@ func TestSceneQueryPerformerTags(t *testing.T) { Q: &q, } - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) tagCriterion = models.HierarchicalMultiCriterionInput{ @@ -1158,22 +3254,22 @@ func TestSceneQueryPerformerTags(t *testing.T) { } q = getSceneStringValue(sceneIdx1WithPerformer, titleField) - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 1) assert.Equal(t, sceneIDs[sceneIdx1WithPerformer], scenes[0].ID) q = getSceneStringValue(sceneIdxWithPerformerTag, titleField) - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) tagCriterion.Modifier = models.CriterionModifierNotNull - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 1) assert.Equal(t, sceneIDs[sceneIdxWithPerformerTag], scenes[0].ID) q = getSceneStringValue(sceneIdx1WithPerformer, titleField) - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) return nil @@ -1181,8 +3277,8 @@ func TestSceneQueryPerformerTags(t *testing.T) { } func TestSceneQueryStudio(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(studioIDs[studioIdxWithScene]), @@ -1194,7 +3290,7 @@ func TestSceneQueryStudio(t *testing.T) { Studios: &studioCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 1) @@ -1213,7 +3309,7 @@ func TestSceneQueryStudio(t *testing.T) { Q: &q, } - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) return nil @@ -1221,8 +3317,8 @@ func TestSceneQueryStudio(t *testing.T) { } func TestSceneQueryStudioDepth(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene depth := 2 studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ @@ -1236,16 +3332,16 @@ func TestSceneQueryStudioDepth(t *testing.T) { Studios: &studioCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 1) depth = 1 - scenes = queryScene(t, sqb, &sceneFilter, nil) + scenes = queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 0) studioCriterion.Value = []string{strconv.Itoa(studioIDs[studioIdxWithParentAndChild])} - scenes = queryScene(t, sqb, &sceneFilter, nil) + scenes = queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 1) // ensure id is correct @@ -1265,15 +3361,15 @@ func TestSceneQueryStudioDepth(t *testing.T) { Q: &q, } - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) depth = 1 - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 1) studioCriterion.Value = []string{strconv.Itoa(studioIDs[studioIdxWithParentAndChild])} - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) return nil @@ -1281,8 +3377,8 @@ func TestSceneQueryStudioDepth(t *testing.T) { } func TestSceneQueryMovies(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene movieCriterion := models.MultiCriterionInput{ Value: []string{ strconv.Itoa(movieIDs[movieIdxWithScene]), @@ -1294,7 +3390,7 @@ func TestSceneQueryMovies(t *testing.T) { Movies: &movieCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Len(t, scenes, 1) @@ -1313,55 +3409,152 @@ func TestSceneQueryMovies(t *testing.T) { Q: &q, } - scenes = queryScene(t, sqb, &sceneFilter, &findFilter) + scenes = queryScene(ctx, t, sqb, &sceneFilter, &findFilter) assert.Len(t, scenes, 0) return nil }) } -func TestSceneQuerySorting(t *testing.T) { - sort := titleField - direction := models.SortDirectionEnumAsc - findFilter := models.FindFilterType{ - Sort: &sort, - Direction: &direction, - } +func TestSceneQueryPhashDuplicated(t *testing.T) { + withTxn(func(ctx context.Context) error { + sqb := db.Scene + duplicated := true + phashCriterion := models.PHashDuplicationCriterionInput{ + Duplicated: &duplicated, + } - withTxn(func(r models.Repository) error { - sqb := r.Scene() - scenes := queryScene(t, sqb, nil, &findFilter) + sceneFilter := models.SceneFilterType{ + Duplicated: &phashCriterion, + } - // scenes should be in same order as indexes - firstScene := scenes[0] - lastScene := scenes[len(scenes)-1] + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) - assert.Equal(t, sceneIDs[0], firstScene.ID) - assert.Equal(t, sceneIDs[sceneIdxWithSpacedName], lastScene.ID) + assert.Len(t, scenes, dupeScenePhashes*2) - // sort in descending order - direction = models.SortDirectionEnumDesc + duplicated = false - scenes = queryScene(t, sqb, nil, &findFilter) - firstScene = scenes[0] - lastScene = scenes[len(scenes)-1] - - assert.Equal(t, sceneIDs[sceneIdxWithSpacedName], firstScene.ID) - assert.Equal(t, sceneIDs[0], lastScene.ID) + scenes = queryScene(ctx, t, sqb, &sceneFilter, nil) + // -1 for missing phash + assert.Len(t, scenes, totalScenes-(dupeScenePhashes*2)-1) return nil }) } +func TestSceneQuerySorting(t *testing.T) { + tests := []struct { + name string + sortBy string + dir models.SortDirectionEnum + firstSceneIdx int // -1 to ignore + lastSceneIdx int + }{ + { + "bitrate", + "bitrate", + models.SortDirectionEnumAsc, + -1, + -1, + }, + { + "duration", + "duration", + models.SortDirectionEnumDesc, + -1, + -1, + }, + { + "file mod time", + "file_mod_time", + models.SortDirectionEnumDesc, + -1, + -1, + }, + { + "file size", + "filesize", + models.SortDirectionEnumDesc, + -1, + -1, + }, + { + "frame rate", + "framerate", + models.SortDirectionEnumDesc, + -1, + -1, + }, + { + "path", + "path", + models.SortDirectionEnumDesc, + -1, + -1, + }, + { + "perceptual_similarity", + "perceptual_similarity", + models.SortDirectionEnumDesc, + -1, + -1, + }, + } + + qb := db.Scene + + for _, tt := range tests { + runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { + assert := assert.New(t) + got, err := qb.Query(ctx, models.SceneQueryOptions{ + QueryOptions: models.QueryOptions{ + FindFilter: &models.FindFilterType{ + Sort: &tt.sortBy, + Direction: &tt.dir, + }, + }, + }) + + if err != nil { + t.Errorf("sceneQueryBuilder.TestSceneQuerySorting() error = %v", err) + return + } + + scenes, err := got.Resolve(ctx) + if err != nil { + t.Errorf("sceneQueryBuilder.TestSceneQuerySorting() error = %v", err) + return + } + + if !assert.Greater(len(scenes), 0) { + return + } + + // scenes should be in same order as indexes + firstScene := scenes[0] + lastScene := scenes[len(scenes)-1] + + if tt.firstSceneIdx != -1 { + firstSceneID := sceneIDs[tt.firstSceneIdx] + assert.Equal(firstSceneID, firstScene.ID) + } + if tt.lastSceneIdx != -1 { + lastSceneID := sceneIDs[tt.lastSceneIdx] + assert.Equal(lastSceneID, lastScene.ID) + } + }) + } +} + func TestSceneQueryPagination(t *testing.T) { perPage := 1 findFilter := models.FindFilterType{ PerPage: &perPage, } - withTxn(func(r models.Repository) error { - sqb := r.Scene() - scenes := queryScene(t, sqb, nil, &findFilter) + withTxn(func(ctx context.Context) error { + sqb := db.Scene + scenes := queryScene(ctx, t, sqb, nil, &findFilter) assert.Len(t, scenes, 1) @@ -1369,7 +3562,7 @@ func TestSceneQueryPagination(t *testing.T) { page := 2 findFilter.Page = &page - scenes = queryScene(t, sqb, nil, &findFilter) + scenes = queryScene(ctx, t, sqb, nil, &findFilter) assert.Len(t, scenes, 1) secondID := scenes[0].ID @@ -1378,7 +3571,7 @@ func TestSceneQueryPagination(t *testing.T) { perPage = 2 page = 1 - scenes = queryScene(t, sqb, nil, &findFilter) + scenes = queryScene(ctx, t, sqb, nil, &findFilter) assert.Len(t, scenes, 2) assert.Equal(t, firstID, scenes[0].ID) assert.Equal(t, secondID, scenes[1].ID) @@ -1407,21 +3600,21 @@ func TestSceneQueryTagCount(t *testing.T) { } func verifyScenesTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene sceneFilter := models.SceneFilterType{ TagCount: &tagCountCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Greater(t, len(scenes), 0) for _, scene := range scenes { - ids, err := sqb.GetTagIDs(scene.ID) - if err != nil { - return err + if err := scene.LoadTagIDs(ctx, sqb); err != nil { + t.Errorf("scene.LoadTagIDs() error = %v", err) + return nil } - verifyInt(t, len(ids), tagCountCriterion) + verifyInt(t, len(scene.TagIDs.List()), tagCountCriterion) } return nil @@ -1448,21 +3641,22 @@ func TestSceneQueryPerformerCount(t *testing.T) { } func verifyScenesPerformerCount(t *testing.T, performerCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene sceneFilter := models.SceneFilterType{ PerformerCount: &performerCountCriterion, } - scenes := queryScene(t, sqb, &sceneFilter, nil) + scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) assert.Greater(t, len(scenes), 0) for _, scene := range scenes { - ids, err := sqb.GetPerformerIDs(scene.ID) - if err != nil { - return err + if err := scene.LoadPerformerIDs(ctx, sqb); err != nil { + t.Errorf("scene.LoadPerformerIDs() error = %v", err) + return nil } - verifyInt(t, len(ids), performerCountCriterion) + + verifyInt(t, len(scene.PerformerIDs.List()), performerCountCriterion) } return nil @@ -1470,10 +3664,10 @@ func verifyScenesPerformerCount(t *testing.T, performerCountCriterion models.Int } func TestSceneCountByTagID(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene - sceneCount, err := sqb.CountByTagID(tagIDs[tagIdxWithScene]) + sceneCount, err := sqb.CountByTagID(ctx, tagIDs[tagIdxWithScene]) if err != nil { t.Errorf("error calling CountByTagID: %s", err.Error()) @@ -1481,7 +3675,7 @@ func TestSceneCountByTagID(t *testing.T) { assert.Equal(t, 1, sceneCount) - sceneCount, err = sqb.CountByTagID(0) + sceneCount, err = sqb.CountByTagID(ctx, 0) if err != nil { t.Errorf("error calling CountByTagID: %s", err.Error()) @@ -1494,10 +3688,10 @@ func TestSceneCountByTagID(t *testing.T) { } func TestSceneCountByMovieID(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene - sceneCount, err := sqb.CountByMovieID(movieIDs[movieIdxWithScene]) + sceneCount, err := sqb.CountByMovieID(ctx, movieIDs[movieIdxWithScene]) if err != nil { t.Errorf("error calling CountByMovieID: %s", err.Error()) @@ -1505,7 +3699,7 @@ func TestSceneCountByMovieID(t *testing.T) { assert.Equal(t, 1, sceneCount) - sceneCount, err = sqb.CountByMovieID(0) + sceneCount, err = sqb.CountByMovieID(ctx, 0) if err != nil { t.Errorf("error calling CountByMovieID: %s", err.Error()) @@ -1518,10 +3712,10 @@ func TestSceneCountByMovieID(t *testing.T) { } func TestSceneCountByStudioID(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene - sceneCount, err := sqb.CountByStudioID(studioIDs[studioIdxWithScene]) + sceneCount, err := sqb.CountByStudioID(ctx, studioIDs[studioIdxWithScene]) if err != nil { t.Errorf("error calling CountByStudioID: %s", err.Error()) @@ -1529,7 +3723,7 @@ func TestSceneCountByStudioID(t *testing.T) { assert.Equal(t, 1, sceneCount) - sceneCount, err = sqb.CountByStudioID(0) + sceneCount, err = sqb.CountByStudioID(ctx, 0) if err != nil { t.Errorf("error calling CountByStudioID: %s", err.Error()) @@ -1542,10 +3736,10 @@ func TestSceneCountByStudioID(t *testing.T) { } func TestFindByMovieID(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene - scenes, err := sqb.FindByMovieID(movieIDs[movieIdxWithScene]) + scenes, err := sqb.FindByMovieID(ctx, movieIDs[movieIdxWithScene]) if err != nil { t.Errorf("error calling FindByMovieID: %s", err.Error()) @@ -1554,7 +3748,7 @@ func TestFindByMovieID(t *testing.T) { assert.Len(t, scenes, 1) assert.Equal(t, sceneIDs[sceneIdxWithMovie], scenes[0].ID) - scenes, err = sqb.FindByMovieID(0) + scenes, err = sqb.FindByMovieID(ctx, 0) if err != nil { t.Errorf("error calling FindByMovieID: %s", err.Error()) @@ -1567,10 +3761,10 @@ func TestFindByMovieID(t *testing.T) { } func TestFindByPerformerID(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Scene() + withTxn(func(ctx context.Context) error { + sqb := db.Scene - scenes, err := sqb.FindByPerformerID(performerIDs[performerIdxWithScene]) + scenes, err := sqb.FindByPerformerID(ctx, performerIDs[performerIdxWithScene]) if err != nil { t.Errorf("error calling FindByPerformerID: %s", err.Error()) @@ -1579,7 +3773,7 @@ func TestFindByPerformerID(t *testing.T) { assert.Len(t, scenes, 1) assert.Equal(t, sceneIDs[sceneIdxWithPerformer], scenes[0].ID) - scenes, err = sqb.FindByPerformerID(0) + scenes, err = sqb.FindByPerformerID(ctx, 0) if err != nil { t.Errorf("error calling FindByPerformerID: %s", err.Error()) @@ -1592,35 +3786,25 @@ func TestFindByPerformerID(t *testing.T) { } func TestSceneUpdateSceneCover(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Scene() + if err := withTxn(func(ctx context.Context) error { + qb := db.Scene - // create performer to test against - const name = "TestSceneUpdateSceneCover" - scene := models.Scene{ - Path: name, - Checksum: sql.NullString{String: md5.FromString(name), Valid: true}, - } - created, err := qb.Create(scene) - if err != nil { - return fmt.Errorf("Error creating scene: %s", err.Error()) - } + sceneID := sceneIDs[sceneIdxWithGallery] image := []byte("image") - err = qb.UpdateCover(created.ID, image) - if err != nil { + if err := qb.UpdateCover(ctx, sceneID, image); err != nil { return fmt.Errorf("Error updating scene cover: %s", err.Error()) } // ensure image set - storedImage, err := qb.GetCover(created.ID) + storedImage, err := qb.GetCover(ctx, sceneID) if err != nil { return fmt.Errorf("Error getting image: %s", err.Error()) } assert.Equal(t, storedImage, image) // set nil image - err = qb.UpdateCover(created.ID, nil) + err = qb.UpdateCover(ctx, sceneID, nil) if err == nil { return fmt.Errorf("Expected error setting nil image") } @@ -1632,33 +3816,22 @@ func TestSceneUpdateSceneCover(t *testing.T) { } func TestSceneDestroySceneCover(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Scene() + if err := withTxn(func(ctx context.Context) error { + qb := db.Scene - // create performer to test against - const name = "TestSceneDestroySceneCover" - scene := models.Scene{ - Path: name, - Checksum: sql.NullString{String: md5.FromString(name), Valid: true}, - } - created, err := qb.Create(scene) - if err != nil { - return fmt.Errorf("Error creating scene: %s", err.Error()) - } + sceneID := sceneIDs[sceneIdxWithGallery] image := []byte("image") - err = qb.UpdateCover(created.ID, image) - if err != nil { + if err := qb.UpdateCover(ctx, sceneID, image); err != nil { return fmt.Errorf("Error updating scene image: %s", err.Error()) } - err = qb.DestroyCover(created.ID) - if err != nil { + if err := qb.DestroyCover(ctx, sceneID); err != nil { return fmt.Errorf("Error destroying scene cover: %s", err.Error()) } // image should be nil - storedImage, err := qb.GetCover(created.ID) + storedImage, err := qb.GetCover(ctx, sceneID) if err != nil { return fmt.Errorf("Error getting image: %s", err.Error()) } @@ -1671,30 +3844,84 @@ func TestSceneDestroySceneCover(t *testing.T) { } func TestSceneStashIDs(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Scene() + if err := withTxn(func(ctx context.Context) error { + qb := db.Scene // create scene to test against const name = "TestSceneStashIDs" - scene := models.Scene{ - Path: name, - Checksum: sql.NullString{String: md5.FromString(name), Valid: true}, + scene := &models.Scene{ + Title: name, } - created, err := qb.Create(scene) - if err != nil { + if err := qb.Create(ctx, scene, nil); err != nil { return fmt.Errorf("Error creating scene: %s", err.Error()) } - testStashIDReaderWriter(t, qb, created.ID) + if err := scene.LoadStashIDs(ctx, qb); err != nil { + return err + } + + testSceneStashIDs(ctx, t, scene) return nil }); err != nil { t.Error(err.Error()) } } +func testSceneStashIDs(ctx context.Context, t *testing.T, s *models.Scene) { + // ensure no stash IDs to begin with + assert.Len(t, s.StashIDs.List(), 0) + + // add stash ids + const stashIDStr = "stashID" + const endpoint = "endpoint" + stashID := models.StashID{ + StashID: stashIDStr, + Endpoint: endpoint, + } + + qb := db.Scene + + // update stash ids and ensure was updated + var err error + s, err = qb.UpdatePartial(ctx, s.ID, models.ScenePartial{ + StashIDs: &models.UpdateStashIDs{ + StashIDs: []models.StashID{stashID}, + Mode: models.RelationshipUpdateModeSet, + }, + }) + if err != nil { + t.Error(err.Error()) + } + + if err := s.LoadStashIDs(ctx, qb); err != nil { + t.Error(err.Error()) + return + } + + assert.Equal(t, []models.StashID{stashID}, s.StashIDs.List()) + + // remove stash ids and ensure was updated + s, err = qb.UpdatePartial(ctx, s.ID, models.ScenePartial{ + StashIDs: &models.UpdateStashIDs{ + StashIDs: []models.StashID{stashID}, + Mode: models.RelationshipUpdateModeRemove, + }, + }) + if err != nil { + t.Error(err.Error()) + } + + if err := s.LoadStashIDs(ctx, qb); err != nil { + t.Error(err.Error()) + return + } + + assert.Len(t, s.StashIDs.List(), 0) +} + func TestSceneQueryQTrim(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Scene() + if err := withTxn(func(ctx context.Context) error { + qb := db.Scene expectedID := sceneIDs[sceneIdxWithSpacedName] @@ -1717,7 +3944,7 @@ func TestSceneQueryQTrim(t *testing.T) { f := models.FindFilterType{ Q: &tst.query, } - scenes := queryScene(t, qb, nil, &f) + scenes := queryScene(ctx, t, qb, nil, &f) assert.Len(t, scenes, tst.count) if len(scenes) > 0 { @@ -1726,7 +3953,7 @@ func TestSceneQueryQTrim(t *testing.T) { } findFilter := models.FindFilterType{} - scenes := queryScene(t, qb, nil, &findFilter) + scenes := queryScene(ctx, t, qb, nil, &findFilter) assert.NotEqual(t, 0, len(scenes)) return nil @@ -1735,12 +3962,48 @@ func TestSceneQueryQTrim(t *testing.T) { } } -// TODO Update -// TODO IncrementOCounter -// TODO DecrementOCounter -// TODO ResetOCounter -// TODO Destroy -// TODO FindByChecksum +func TestSceneStore_All(t *testing.T) { + qb := db.Scene + + withRollbackTxn(func(ctx context.Context) error { + got, err := qb.All(ctx) + if err != nil { + t.Errorf("SceneStore.All() error = %v", err) + return nil + } + + // it's possible that other tests have created scenes + assert.GreaterOrEqual(t, len(got), len(sceneIDs)) + + return nil + }) +} + +func TestSceneStore_FindDuplicates(t *testing.T) { + qb := db.Scene + + withRollbackTxn(func(ctx context.Context) error { + distance := 0 + got, err := qb.FindDuplicates(ctx, distance) + if err != nil { + t.Errorf("SceneStore.FindDuplicates() error = %v", err) + return nil + } + + assert.Len(t, got, dupeScenePhashes) + + distance = 1 + got, err = qb.FindDuplicates(ctx, distance) + if err != nil { + t.Errorf("SceneStore.FindDuplicates() error = %v", err) + return nil + } + + assert.Len(t, got, dupeScenePhashes) + + return nil + }) +} + // TODO Count // TODO SizeCount -// TODO All diff --git a/pkg/sqlite/scraped_item.go b/pkg/sqlite/scraped_item.go index 1eafc98a5..1b8216dab 100644 --- a/pkg/sqlite/scraped_item.go +++ b/pkg/sqlite/scraped_item.go @@ -1,6 +1,7 @@ package sqlite import ( + "context" "database/sql" "errors" @@ -13,41 +14,38 @@ type scrapedItemQueryBuilder struct { repository } -func NewScrapedItemReaderWriter(tx dbi) *scrapedItemQueryBuilder { - return &scrapedItemQueryBuilder{ - repository{ - tx: tx, - tableName: scrapedItemTable, - idColumn: idColumn, - }, - } +var ScrapedItemReaderWriter = &scrapedItemQueryBuilder{ + repository{ + tableName: scrapedItemTable, + idColumn: idColumn, + }, } -func (qb *scrapedItemQueryBuilder) Create(newObject models.ScrapedItem) (*models.ScrapedItem, error) { +func (qb *scrapedItemQueryBuilder) Create(ctx context.Context, newObject models.ScrapedItem) (*models.ScrapedItem, error) { var ret models.ScrapedItem - if err := qb.insertObject(newObject, &ret); err != nil { + if err := qb.insertObject(ctx, newObject, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *scrapedItemQueryBuilder) Update(updatedObject models.ScrapedItem) (*models.ScrapedItem, error) { +func (qb *scrapedItemQueryBuilder) Update(ctx context.Context, updatedObject models.ScrapedItem) (*models.ScrapedItem, error) { const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } - return qb.find(updatedObject.ID) + return qb.find(ctx, updatedObject.ID) } -func (qb *scrapedItemQueryBuilder) Find(id int) (*models.ScrapedItem, error) { - return qb.find(id) +func (qb *scrapedItemQueryBuilder) Find(ctx context.Context, id int) (*models.ScrapedItem, error) { + return qb.find(ctx, id) } -func (qb *scrapedItemQueryBuilder) find(id int) (*models.ScrapedItem, error) { +func (qb *scrapedItemQueryBuilder) find(ctx context.Context, id int) (*models.ScrapedItem, error) { var ret models.ScrapedItem - if err := qb.get(id, &ret); err != nil { + if err := qb.getByID(ctx, id, &ret); err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -56,8 +54,8 @@ func (qb *scrapedItemQueryBuilder) find(id int) (*models.ScrapedItem, error) { return &ret, nil } -func (qb *scrapedItemQueryBuilder) All() ([]*models.ScrapedItem, error) { - return qb.queryScrapedItems(selectAll("scraped_items")+qb.getScrapedItemsSort(nil), nil) +func (qb *scrapedItemQueryBuilder) All(ctx context.Context) ([]*models.ScrapedItem, error) { + return qb.queryScrapedItems(ctx, selectAll("scraped_items")+qb.getScrapedItemsSort(nil), nil) } func (qb *scrapedItemQueryBuilder) getScrapedItemsSort(findFilter *models.FindFilterType) string { @@ -73,9 +71,9 @@ func (qb *scrapedItemQueryBuilder) getScrapedItemsSort(findFilter *models.FindFi return getSort(sort, direction, "scraped_items") } -func (qb *scrapedItemQueryBuilder) queryScrapedItems(query string, args []interface{}) ([]*models.ScrapedItem, error) { +func (qb *scrapedItemQueryBuilder) queryScrapedItems(ctx context.Context, query string, args []interface{}) ([]*models.ScrapedItem, error) { var ret models.ScrapedItems - if err := qb.query(query, args, &ret); err != nil { + if err := qb.query(ctx, query, args, &ret); err != nil { return nil, err } diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index e07d8aebe..92412ac89 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -9,23 +9,51 @@ import ( "errors" "fmt" "os" + "path/filepath" "strconv" "testing" "time" - "github.com/stashapp/stash/pkg/database" - "github.com/stashapp/stash/pkg/gallery" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/hash/md5" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/scene" "github.com/stashapp/stash/pkg/sliceutil/intslice" "github.com/stashapp/stash/pkg/sqlite" + "github.com/stashapp/stash/pkg/txn" + + // necessary to register custom migrations + _ "github.com/stashapp/stash/pkg/sqlite/migrations" ) const ( spacedSceneTitle = "zzz yyy xxx" ) +const ( + folderIdxWithSubFolder = iota + folderIdxWithParentFolder + folderIdxWithFiles + folderIdxInZip + + folderIdxForObjectFiles + folderIdxWithImageFiles + folderIdxWithGalleryFiles + folderIdxWithSceneFiles + + totalFolders +) + +const ( + fileIdxZip = iota + fileIdxInZip + + fileIdxStartVideoFiles + fileIdxStartImageFiles + fileIdxStartGalleryFiles + + totalFiles +) + const ( sceneIdxWithMovie = iota sceneIdxWithGallery @@ -45,12 +73,15 @@ const ( sceneIdxWithSpacedName sceneIdxWithStudioPerformer sceneIdxWithGrandChildStudio + sceneIdxMissingPhash // new indexes above lastSceneIdx totalScenes = lastSceneIdx + 3 ) +const dupeScenePhashes = 2 + const ( imageIdxWithGallery = iota imageIdx1WithGallery @@ -66,7 +97,7 @@ const ( imageIdx1WithStudio imageIdx2WithStudio imageIdxWithStudioPerformer - imageIdxInZip // TODO - not implemented + imageIdxInZip imageIdxWithPerformerTag imageIdxWithPerformerTwoTags imageIdxWithGrandChildStudio @@ -133,6 +164,7 @@ const ( galleryIdxWithPerformerTwoTags galleryIdxWithStudioPerformer galleryIdxWithGrandChildStudio + galleryIdxWithoutFile // new indexes above lastGalleryIdx @@ -225,6 +257,12 @@ const ( ) var ( + folderIDs []file.FolderID + fileIDs []file.ID + sceneFileIDs []file.ID + imageFileIDs []file.ID + galleryFileIDs []file.ID + sceneIDs []int imageIDs []int performerIDs []int @@ -235,6 +273,8 @@ var ( markerIDs []int savedFilterIDs []int + folderPaths []string + tagNames []string studioNames []string movieNames []string @@ -246,39 +286,75 @@ type idAssociation struct { second int } +type linkMap map[int][]int + +func (m linkMap) reverseLookup(idx int) []int { + var result []int + + for k, v := range m { + for _, vv := range v { + if vv == idx { + result = append(result, k) + } + } + } + + return result +} + var ( - sceneTagLinks = [][2]int{ - {sceneIdxWithTag, tagIdxWithScene}, - {sceneIdxWithTwoTags, tagIdx1WithScene}, - {sceneIdxWithTwoTags, tagIdx2WithScene}, - {sceneIdxWithMarkerAndTag, tagIdx3WithScene}, + folderParentFolders = map[int]int{ + folderIdxWithParentFolder: folderIdxWithSubFolder, + folderIdxWithSceneFiles: folderIdxForObjectFiles, + folderIdxWithImageFiles: folderIdxForObjectFiles, + folderIdxWithGalleryFiles: folderIdxForObjectFiles, } - scenePerformerLinks = [][2]int{ - {sceneIdxWithPerformer, performerIdxWithScene}, - {sceneIdxWithTwoPerformers, performerIdx1WithScene}, - {sceneIdxWithTwoPerformers, performerIdx2WithScene}, - {sceneIdxWithPerformerTag, performerIdxWithTag}, - {sceneIdxWithPerformerTwoTags, performerIdxWithTwoTags}, - {sceneIdx1WithPerformer, performerIdxWithTwoScenes}, - {sceneIdx2WithPerformer, performerIdxWithTwoScenes}, - {sceneIdxWithStudioPerformer, performerIdxWithSceneStudio}, + fileFolders = map[int]int{ + fileIdxZip: folderIdxWithFiles, + fileIdxInZip: folderIdxInZip, } - sceneGalleryLinks = [][2]int{ - {sceneIdxWithGallery, galleryIdxWithScene}, + folderZipFiles = map[int]int{ + folderIdxInZip: fileIdxZip, } - sceneMovieLinks = [][2]int{ - {sceneIdxWithMovie, movieIdxWithScene}, + fileZipFiles = map[int]int{ + fileIdxInZip: fileIdxZip, + } +) + +var ( + sceneTags = linkMap{ + sceneIdxWithTag: {tagIdxWithScene}, + sceneIdxWithTwoTags: {tagIdx1WithScene, tagIdx2WithScene}, + sceneIdxWithMarkerAndTag: {tagIdx3WithScene}, } - sceneStudioLinks = [][2]int{ - {sceneIdxWithStudio, studioIdxWithScene}, - {sceneIdx1WithStudio, studioIdxWithTwoScenes}, - {sceneIdx2WithStudio, studioIdxWithTwoScenes}, - {sceneIdxWithStudioPerformer, studioIdxWithScenePerformer}, - {sceneIdxWithGrandChildStudio, studioIdxWithGrandParent}, + scenePerformers = linkMap{ + sceneIdxWithPerformer: {performerIdxWithScene}, + sceneIdxWithTwoPerformers: {performerIdx1WithScene, performerIdx2WithScene}, + sceneIdxWithPerformerTag: {performerIdxWithTag}, + sceneIdxWithPerformerTwoTags: {performerIdxWithTwoTags}, + sceneIdx1WithPerformer: {performerIdxWithTwoScenes}, + sceneIdx2WithPerformer: {performerIdxWithTwoScenes}, + sceneIdxWithStudioPerformer: {performerIdxWithSceneStudio}, + } + + sceneGalleries = linkMap{ + sceneIdxWithGallery: {galleryIdxWithScene}, + } + + sceneMovies = linkMap{ + sceneIdxWithMovie: {movieIdxWithScene}, + } + + sceneStudios = map[int]int{ + sceneIdxWithStudio: studioIdxWithScene, + sceneIdx1WithStudio: studioIdxWithTwoScenes, + sceneIdx2WithStudio: studioIdxWithTwoScenes, + sceneIdxWithStudioPerformer: studioIdxWithScenePerformer, + sceneIdxWithGrandChildStudio: studioIdxWithGrandParent, } ) @@ -298,61 +374,56 @@ var ( ) var ( - imageGalleryLinks = [][2]int{ - {imageIdxWithGallery, galleryIdxWithImage}, - {imageIdx1WithGallery, galleryIdxWithTwoImages}, - {imageIdx2WithGallery, galleryIdxWithTwoImages}, - {imageIdxWithTwoGalleries, galleryIdx1WithImage}, - {imageIdxWithTwoGalleries, galleryIdx2WithImage}, + imageGalleries = linkMap{ + imageIdxWithGallery: {galleryIdxWithImage}, + imageIdx1WithGallery: {galleryIdxWithTwoImages}, + imageIdx2WithGallery: {galleryIdxWithTwoImages}, + imageIdxWithTwoGalleries: {galleryIdx1WithImage, galleryIdx2WithImage}, } - imageStudioLinks = [][2]int{ - {imageIdxWithStudio, studioIdxWithImage}, - {imageIdx1WithStudio, studioIdxWithTwoImages}, - {imageIdx2WithStudio, studioIdxWithTwoImages}, - {imageIdxWithStudioPerformer, studioIdxWithImagePerformer}, - {imageIdxWithGrandChildStudio, studioIdxWithGrandParent}, + imageStudios = map[int]int{ + imageIdxWithStudio: studioIdxWithImage, + imageIdx1WithStudio: studioIdxWithTwoImages, + imageIdx2WithStudio: studioIdxWithTwoImages, + imageIdxWithStudioPerformer: studioIdxWithImagePerformer, + imageIdxWithGrandChildStudio: studioIdxWithGrandParent, } - imageTagLinks = [][2]int{ - {imageIdxWithTag, tagIdxWithImage}, - {imageIdxWithTwoTags, tagIdx1WithImage}, - {imageIdxWithTwoTags, tagIdx2WithImage}, + imageTags = linkMap{ + imageIdxWithTag: {tagIdxWithImage}, + imageIdxWithTwoTags: {tagIdx1WithImage, tagIdx2WithImage}, } - imagePerformerLinks = [][2]int{ - {imageIdxWithPerformer, performerIdxWithImage}, - {imageIdxWithTwoPerformers, performerIdx1WithImage}, - {imageIdxWithTwoPerformers, performerIdx2WithImage}, - {imageIdxWithPerformerTag, performerIdxWithTag}, - {imageIdxWithPerformerTwoTags, performerIdxWithTwoTags}, - {imageIdx1WithPerformer, performerIdxWithTwoImages}, - {imageIdx2WithPerformer, performerIdxWithTwoImages}, - {imageIdxWithStudioPerformer, performerIdxWithImageStudio}, + imagePerformers = linkMap{ + imageIdxWithPerformer: {performerIdxWithImage}, + imageIdxWithTwoPerformers: {performerIdx1WithImage, performerIdx2WithImage}, + imageIdxWithPerformerTag: {performerIdxWithTag}, + imageIdxWithPerformerTwoTags: {performerIdxWithTwoTags}, + imageIdx1WithPerformer: {performerIdxWithTwoImages}, + imageIdx2WithPerformer: {performerIdxWithTwoImages}, + imageIdxWithStudioPerformer: {performerIdxWithImageStudio}, } ) var ( - galleryPerformerLinks = [][2]int{ - {galleryIdxWithPerformer, performerIdxWithGallery}, - {galleryIdxWithTwoPerformers, performerIdx1WithGallery}, - {galleryIdxWithTwoPerformers, performerIdx2WithGallery}, - {galleryIdxWithPerformerTag, performerIdxWithTag}, - {galleryIdxWithPerformerTwoTags, performerIdxWithTwoTags}, - {galleryIdx1WithPerformer, performerIdxWithTwoGalleries}, - {galleryIdx2WithPerformer, performerIdxWithTwoGalleries}, - {galleryIdxWithStudioPerformer, performerIdxWithGalleryStudio}, + galleryPerformers = linkMap{ + galleryIdxWithPerformer: {performerIdxWithGallery}, + galleryIdxWithTwoPerformers: {performerIdx1WithGallery, performerIdx2WithGallery}, + galleryIdxWithPerformerTag: {performerIdxWithTag}, + galleryIdxWithPerformerTwoTags: {performerIdxWithTwoTags}, + galleryIdx1WithPerformer: {performerIdxWithTwoGalleries}, + galleryIdx2WithPerformer: {performerIdxWithTwoGalleries}, + galleryIdxWithStudioPerformer: {performerIdxWithGalleryStudio}, } - galleryStudioLinks = [][2]int{ - {galleryIdxWithStudio, studioIdxWithGallery}, - {galleryIdx1WithStudio, studioIdxWithTwoGalleries}, - {galleryIdx2WithStudio, studioIdxWithTwoGalleries}, - {galleryIdxWithStudioPerformer, studioIdxWithGalleryPerformer}, - {galleryIdxWithGrandChildStudio, studioIdxWithGrandParent}, + galleryStudios = map[int]int{ + galleryIdxWithStudio: studioIdxWithGallery, + galleryIdx1WithStudio: studioIdxWithTwoGalleries, + galleryIdx2WithStudio: studioIdxWithTwoGalleries, + galleryIdxWithStudioPerformer: studioIdxWithGalleryPerformer, + galleryIdxWithGrandChildStudio: studioIdxWithGrandParent, } - galleryTagLinks = [][2]int{ - {galleryIdxWithTag, tagIdxWithGallery}, - {galleryIdxWithTwoTags, tagIdx1WithGallery}, - {galleryIdxWithTwoTags, tagIdx2WithGallery}, + galleryTags = linkMap{ + galleryIdxWithTag: {tagIdxWithGallery}, + galleryIdxWithTwoTags: {tagIdx1WithGallery, tagIdx2WithGallery}, } ) @@ -386,28 +457,47 @@ var ( } ) +func indexesToIDs(ids []int, indexes []int) []int { + ret := make([]int, len(indexes)) + for i, idx := range indexes { + ret[i] = ids[idx] + } + + return ret +} + +var db *sqlite.Database + func TestMain(m *testing.M) { ret := runTests(m) os.Exit(ret) } -func withTxn(f func(r models.Repository) error) error { - t := sqlite.NewTransactionManager() - return t.WithTxn(context.TODO(), f) +func withTxn(f func(ctx context.Context) error) error { + return txn.WithTxn(context.Background(), db, f) } -func withRollbackTxn(f func(r models.Repository) error) error { +func withRollbackTxn(f func(ctx context.Context) error) error { var ret error - withTxn(func(repo models.Repository) error { - ret = f(repo) + withTxn(func(ctx context.Context) error { + ret = f(ctx) return errors.New("fake error for rollback") }) return ret } +func runWithRollbackTxn(t *testing.T, name string, f func(t *testing.T, ctx context.Context)) { + withRollbackTxn(func(ctx context.Context) error { + t.Run(name, func(t *testing.T) { + f(t, ctx) + }) + return nil + }) +} + func testTeardown(databaseFile string) { - err := database.DB.Close() + err := db.Close() if err != nil { panic(err) @@ -428,7 +518,9 @@ func runTests(m *testing.M) int { f.Close() databaseFile := f.Name() - if err := database.Initialize(databaseFile); err != nil { + db = sqlite.NewDatabase() + + if err := db.Open(databaseFile); err != nil { panic(fmt.Sprintf("Could not initialize database: %s", err.Error())) } @@ -445,109 +537,71 @@ func runTests(m *testing.M) int { } func populateDB() error { - if err := withTxn(func(r models.Repository) error { - if err := createScenes(r.Scene(), totalScenes); err != nil { - return fmt.Errorf("error creating scenes: %s", err.Error()) + if err := withTxn(func(ctx context.Context) error { + if err := createFolders(ctx); err != nil { + return fmt.Errorf("creating folders: %w", err) } - if err := createImages(r.Image(), totalImages); err != nil { - return fmt.Errorf("error creating images: %s", err.Error()) + if err := createFiles(ctx); err != nil { + return fmt.Errorf("creating files: %w", err) } - if err := createGalleries(r.Gallery(), totalGalleries); err != nil { - return fmt.Errorf("error creating galleries: %s", err.Error()) - } + // TODO - link folders to zip files - if err := createMovies(r.Movie(), moviesNameCase, moviesNameNoCase); err != nil { + if err := createMovies(ctx, sqlite.MovieReaderWriter, moviesNameCase, moviesNameNoCase); err != nil { return fmt.Errorf("error creating movies: %s", err.Error()) } - if err := createPerformers(r.Performer(), performersNameCase, performersNameNoCase); err != nil { + if err := createPerformers(ctx, sqlite.PerformerReaderWriter, performersNameCase, performersNameNoCase); err != nil { return fmt.Errorf("error creating performers: %s", err.Error()) } - if err := createTags(r.Tag(), tagsNameCase, tagsNameNoCase); err != nil { + if err := createTags(ctx, sqlite.TagReaderWriter, tagsNameCase, tagsNameNoCase); err != nil { return fmt.Errorf("error creating tags: %s", err.Error()) } - if err := addTagImage(r.Tag(), tagIdxWithCoverImage); err != nil { - return fmt.Errorf("error adding tag image: %s", err.Error()) - } - - if err := createStudios(r.Studio(), studiosNameCase, studiosNameNoCase); err != nil { + if err := createStudios(ctx, sqlite.StudioReaderWriter, studiosNameCase, studiosNameNoCase); err != nil { return fmt.Errorf("error creating studios: %s", err.Error()) } - if err := createSavedFilters(r.SavedFilter(), totalSavedFilters); err != nil { + if err := createGalleries(ctx, totalGalleries); err != nil { + return fmt.Errorf("error creating galleries: %s", err.Error()) + } + + if err := createScenes(ctx, totalScenes); err != nil { + return fmt.Errorf("error creating scenes: %s", err.Error()) + } + + if err := createImages(ctx, totalImages); err != nil { + return fmt.Errorf("error creating images: %s", err.Error()) + } + + if err := addTagImage(ctx, sqlite.TagReaderWriter, tagIdxWithCoverImage); err != nil { + return fmt.Errorf("error adding tag image: %s", err.Error()) + } + + if err := createSavedFilters(ctx, sqlite.SavedFilterReaderWriter, totalSavedFilters); err != nil { return fmt.Errorf("error creating saved filters: %s", err.Error()) } - if err := linkPerformerTags(r.Performer()); err != nil { + if err := linkPerformerTags(ctx, sqlite.PerformerReaderWriter); err != nil { return fmt.Errorf("error linking performer tags: %s", err.Error()) } - if err := linkSceneGalleries(r.Scene()); err != nil { - return fmt.Errorf("error linking scenes to galleries: %s", err.Error()) - } - - if err := linkSceneMovies(r.Scene()); err != nil { - return fmt.Errorf("error linking scenes to movies: %s", err.Error()) - } - - if err := linkScenePerformers(r.Scene()); err != nil { - return fmt.Errorf("error linking scene performers: %s", err.Error()) - } - - if err := linkSceneTags(r.Scene()); err != nil { - return fmt.Errorf("error linking scene tags: %s", err.Error()) - } - - if err := linkSceneStudios(r.Scene()); err != nil { - return fmt.Errorf("error linking scene studios: %s", err.Error()) - } - - if err := linkImageGalleries(r.Gallery()); err != nil { - return fmt.Errorf("error linking gallery images: %s", err.Error()) - } - - if err := linkImagePerformers(r.Image()); err != nil { - return fmt.Errorf("error linking image performers: %s", err.Error()) - } - - if err := linkImageTags(r.Image()); err != nil { - return fmt.Errorf("error linking image tags: %s", err.Error()) - } - - if err := linkImageStudios(r.Image()); err != nil { - return fmt.Errorf("error linking image studio: %s", err.Error()) - } - - if err := linkMovieStudios(r.Movie()); err != nil { + if err := linkMovieStudios(ctx, sqlite.MovieReaderWriter); err != nil { return fmt.Errorf("error linking movie studios: %s", err.Error()) } - if err := linkStudiosParent(r.Studio()); err != nil { + if err := linkStudiosParent(ctx, sqlite.StudioReaderWriter); err != nil { return fmt.Errorf("error linking studios parent: %s", err.Error()) } - if err := linkGalleryPerformers(r.Gallery()); err != nil { - return fmt.Errorf("error linking gallery performers: %s", err.Error()) - } - - if err := linkGalleryTags(r.Gallery()); err != nil { - return fmt.Errorf("error linking gallery tags: %s", err.Error()) - } - - if err := linkGalleryStudios(r.Gallery()); err != nil { - return fmt.Errorf("error linking gallery studios: %s", err.Error()) - } - - if err := linkTagsParent(r.Tag()); err != nil { + if err := linkTagsParent(ctx, sqlite.TagReaderWriter); err != nil { return fmt.Errorf("error linking tags parent: %s", err.Error()) } for _, ms := range markerSpecs { - if err := createMarker(r.SceneMarker(), ms); err != nil { + if err := createMarker(ctx, sqlite.SceneMarkerReaderWriter, ms); err != nil { return fmt.Errorf("error creating scene marker: %s", err.Error()) } } @@ -560,6 +614,158 @@ func populateDB() error { return nil } +func getFolderPath(index int, parentFolderIdx *int) string { + path := getPrefixedStringValue("folder", index, pathField) + + if parentFolderIdx != nil { + return filepath.Join(folderPaths[*parentFolderIdx], path) + } + + return path +} + +func getFolderModTime(index int) time.Time { + return time.Date(2000, 1, (index%10)+1, 0, 0, 0, 0, time.UTC) +} + +func makeFolder(i int) file.Folder { + var folderID *file.FolderID + var folderIdx *int + if pidx, ok := folderParentFolders[i]; ok { + folderIdx = &pidx + v := folderIDs[pidx] + folderID = &v + } + + return file.Folder{ + ParentFolderID: folderID, + DirEntry: file.DirEntry{ + // zip files have to be added after creating files + ModTime: getFolderModTime(i), + }, + Path: getFolderPath(i, folderIdx), + } +} + +func createFolders(ctx context.Context) error { + qb := db.Folder + + for i := 0; i < totalFolders; i++ { + folder := makeFolder(i) + + if err := qb.Create(ctx, &folder); err != nil { + return fmt.Errorf("Error creating folder [%d] %v+: %s", i, folder, err.Error()) + } + + folderIDs = append(folderIDs, folder.ID) + folderPaths = append(folderPaths, folder.Path) + } + + return nil +} + +func getFileBaseName(index int) string { + return getPrefixedStringValue("file", index, "basename") +} + +func getFileStringValue(index int, field string) string { + return getPrefixedStringValue("file", index, field) +} + +func getFileModTime(index int) time.Time { + return getFolderModTime(index) +} + +func getFileFingerprints(index int) []file.Fingerprint { + return []file.Fingerprint{ + { + Type: "MD5", + Fingerprint: getPrefixedStringValue("file", index, "md5"), + }, + { + Type: "OSHASH", + Fingerprint: getPrefixedStringValue("file", index, "oshash"), + }, + } +} + +func getFileSize(index int) int64 { + return int64(index) * 10 +} + +func getFileDuration(index int) float64 { + duration := (index % 4) + 1 + duration = duration * 100 + + return float64(duration) + 0.432 +} + +func makeFile(i int) file.File { + folderID := folderIDs[fileFolders[i]] + if folderID == 0 { + folderID = folderIDs[folderIdxWithFiles] + } + + var zipFileID *file.ID + if zipFileIndex, found := fileZipFiles[i]; found { + zipFileID = &fileIDs[zipFileIndex] + } + + var ret file.File + baseFile := &file.BaseFile{ + Basename: getFileBaseName(i), + ParentFolderID: folderID, + DirEntry: file.DirEntry{ + // zip files have to be added after creating files + ModTime: getFileModTime(i), + ZipFileID: zipFileID, + }, + Fingerprints: getFileFingerprints(i), + Size: getFileSize(i), + } + + ret = baseFile + + if i >= fileIdxStartVideoFiles && i < fileIdxStartImageFiles { + ret = &file.VideoFile{ + BaseFile: baseFile, + Format: getFileStringValue(i, "format"), + Width: getWidth(i), + Height: getHeight(i), + Duration: getFileDuration(i), + VideoCodec: getFileStringValue(i, "videoCodec"), + AudioCodec: getFileStringValue(i, "audioCodec"), + FrameRate: getFileDuration(i) * 2, + BitRate: int64(getFileDuration(i)) * 3, + } + } else if i >= fileIdxStartImageFiles && i < fileIdxStartGalleryFiles { + ret = &file.ImageFile{ + BaseFile: baseFile, + Format: getFileStringValue(i, "format"), + Width: getWidth(i), + Height: getHeight(i), + } + } + + return ret +} + +func createFiles(ctx context.Context) error { + qb := db.File + + for i := 0; i < totalFiles; i++ { + file := makeFile(i) + + if err := qb.Create(ctx, file); err != nil { + return fmt.Errorf("Error creating file [%d] %v+: %s", i, file, err.Error()) + } + + fileIDs = append(fileIDs, file.Base().ID) + } + + return nil +} + func getPrefixedStringValue(prefix string, index int, field string) string { return fmt.Sprintf("%s_%04d_%s", prefix, index, field) } @@ -584,8 +790,26 @@ func getSceneStringValue(index int, field string) string { return getPrefixedStringValue("scene", index, field) } -func getSceneNullStringValue(index int, field string) sql.NullString { - return getPrefixedNullStringValue("scene", index, field) +func getScenePhash(index int, field string) int64 { + return int64(index % (totalScenes - dupeScenePhashes) * 1234) +} + +func getSceneStringPtr(index int, field string) *string { + v := getPrefixedStringValue("scene", index, field) + return &v +} + +func getSceneNullStringPtr(index int, field string) *string { + return getStringPtrFromNullString(getPrefixedNullStringValue("scene", index, field)) +} + +func getSceneEmptyString(index int, field string) string { + v := getSceneNullStringPtr(index, field) + if v == nil { + return "" + } + + return *v } func getSceneTitle(index int) string { @@ -602,35 +826,60 @@ func getRating(index int) sql.NullInt64 { return sql.NullInt64{Int64: int64(rating), Valid: rating > 0} } +func getIntPtr(r sql.NullInt64) *int { + if !r.Valid { + return nil + } + + v := int(r.Int64) + return &v +} + +func getStringPtrFromNullString(r sql.NullString) *string { + if !r.Valid || r.String == "" { + return nil + } + + v := r.String + return &v +} + +func getStringPtr(r string) *string { + if r == "" { + return nil + } + + return &r +} + +func getEmptyStringFromPtr(v *string) string { + if v == nil { + return "" + } + + return *v +} + func getOCounter(index int) int { return index % 3 } -func getSceneDuration(index int) sql.NullFloat64 { - duration := index % 4 +func getSceneDuration(index int) float64 { + duration := index + 1 duration = duration * 100 - return sql.NullFloat64{ - Float64: float64(duration) + 0.432, - Valid: duration != 0, - } + return float64(duration) + 0.432 } -func getHeight(index int) sql.NullInt64 { - heights := []int64{0, 200, 240, 300, 480, 700, 720, 800, 1080, 1500, 2160, 3000} +func getHeight(index int) int { + heights := []int{200, 240, 300, 480, 700, 720, 800, 1080, 1500, 2160, 3000} height := heights[index%len(heights)] - return sql.NullInt64{ - Int64: height, - Valid: height != 0, - } + return height } -func getWidth(index int) sql.NullInt64 { +func getWidth(index int) int { height := getHeight(index) - return sql.NullInt64{ - Int64: height.Int64 * 2, - Valid: height.Valid, - } + return height * 2 } func getObjectDate(index int) models.SQLiteDate { @@ -642,29 +891,118 @@ func getObjectDate(index int) models.SQLiteDate { } } -func createScenes(sqb models.SceneReaderWriter, n int) error { - for i := 0; i < n; i++ { - scene := models.Scene{ - Path: getSceneStringValue(i, pathField), - Title: sql.NullString{String: getSceneTitle(i), Valid: true}, - Checksum: sql.NullString{String: getSceneStringValue(i, checksumField), Valid: true}, - Details: sql.NullString{String: getSceneStringValue(i, "Details"), Valid: true}, - URL: getSceneNullStringValue(i, urlField), - Rating: getRating(i), - OCounter: getOCounter(i), - Duration: getSceneDuration(i), - Height: getHeight(i), - Width: getWidth(i), - Date: getObjectDate(i), +func getObjectDateObject(index int) *models.Date { + d := getObjectDate(index) + if !d.Valid { + return nil + } + + ret := models.NewDate(d.String) + return &ret +} + +func sceneStashID(i int) models.StashID { + return models.StashID{ + StashID: getSceneStringValue(i, "stashid"), + Endpoint: getSceneStringValue(i, "endpoint"), + } +} + +func getSceneBasename(index int) string { + return getSceneStringValue(index, pathField) +} + +func makeSceneFile(i int) *file.VideoFile { + fp := []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: getSceneStringValue(i, checksumField), + }, + { + Type: file.FingerprintTypeOshash, + Fingerprint: getSceneStringValue(i, "oshash"), + }, + } + + if i != sceneIdxMissingPhash { + fp = append(fp, file.Fingerprint{ + Type: file.FingerprintTypePhash, + Fingerprint: getScenePhash(i, "phash"), + }) + } + + return &file.VideoFile{ + BaseFile: &file.BaseFile{ + Path: getFilePath(folderIdxWithSceneFiles, getSceneBasename(i)), + Basename: getSceneBasename(i), + ParentFolderID: folderIDs[folderIdxWithSceneFiles], + Fingerprints: fp, + }, + Duration: getSceneDuration(i), + Height: getHeight(i), + Width: getWidth(i), + } +} + +func makeScene(i int) *models.Scene { + title := getSceneTitle(i) + details := getSceneStringValue(i, "Details") + + var studioID *int + if _, ok := sceneStudios[i]; ok { + v := studioIDs[sceneStudios[i]] + studioID = &v + } + + gids := indexesToIDs(galleryIDs, sceneGalleries[i]) + pids := indexesToIDs(performerIDs, scenePerformers[i]) + tids := indexesToIDs(tagIDs, sceneTags[i]) + + mids := indexesToIDs(movieIDs, sceneMovies[i]) + + movies := make([]models.MoviesScenes, len(mids)) + for i, m := range mids { + movies[i] = models.MoviesScenes{ + MovieID: m, } + } - created, err := sqb.Create(scene) + return &models.Scene{ + Title: title, + Details: details, + URL: getSceneEmptyString(i, urlField), + Rating: getIntPtr(getRating(i)), + OCounter: getOCounter(i), + Date: getObjectDateObject(i), + StudioID: studioID, + GalleryIDs: models.NewRelatedIDs(gids), + PerformerIDs: models.NewRelatedIDs(pids), + TagIDs: models.NewRelatedIDs(tids), + Movies: models.NewRelatedMovies(movies), + StashIDs: models.NewRelatedStashIDs([]models.StashID{ + sceneStashID(i), + }), + } +} - if err != nil { +func createScenes(ctx context.Context, n int) error { + sqb := db.Scene + fqb := db.File + + for i := 0; i < n; i++ { + f := makeSceneFile(i) + if err := fqb.Create(ctx, f); err != nil { + return fmt.Errorf("creating scene file: %w", err) + } + sceneFileIDs = append(sceneFileIDs, f.ID) + + scene := makeScene(i) + + if err := sqb.Create(ctx, scene, []file.ID{f.ID}); err != nil { return fmt.Errorf("Error creating scene %v+: %s", scene, err.Error()) } - sceneIDs = append(sceneIDs, created.ID) + sceneIDs = append(sceneIDs, scene.ID) } return nil @@ -674,34 +1012,78 @@ func getImageStringValue(index int, field string) string { return fmt.Sprintf("image_%04d_%s", index, field) } -func getImagePath(index int) string { - // TODO - currently not working - // if index == imageIdxInZip { - // return image.ZipFilename(zipPath, "image_0001_Path") - // } - +func getImageBasename(index int) string { return getImageStringValue(index, pathField) } -func createImages(qb models.ImageReaderWriter, n int) error { +func makeImageFile(i int) *file.ImageFile { + return &file.ImageFile{ + BaseFile: &file.BaseFile{ + Path: getFilePath(folderIdxWithImageFiles, getImageBasename(i)), + Basename: getImageBasename(i), + ParentFolderID: folderIDs[folderIdxWithImageFiles], + Fingerprints: []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: getImageStringValue(i, checksumField), + }, + }, + }, + Height: getHeight(i), + Width: getWidth(i), + } +} + +func makeImage(i int) *models.Image { + title := getImageStringValue(i, titleField) + var studioID *int + if _, ok := imageStudios[i]; ok { + v := studioIDs[imageStudios[i]] + studioID = &v + } + + gids := indexesToIDs(galleryIDs, imageGalleries[i]) + pids := indexesToIDs(performerIDs, imagePerformers[i]) + tids := indexesToIDs(tagIDs, imageTags[i]) + + return &models.Image{ + Title: title, + Rating: getIntPtr(getRating(i)), + OCounter: getOCounter(i), + StudioID: studioID, + GalleryIDs: models.NewRelatedIDs(gids), + PerformerIDs: models.NewRelatedIDs(pids), + TagIDs: models.NewRelatedIDs(tids), + } +} + +func createImages(ctx context.Context, n int) error { + qb := db.TxnRepository().Image + fqb := db.File + for i := 0; i < n; i++ { - image := models.Image{ - Path: getImagePath(i), - Title: sql.NullString{String: getImageStringValue(i, titleField), Valid: true}, - Checksum: getImageStringValue(i, checksumField), - Rating: getRating(i), - OCounter: getOCounter(i), - Height: getHeight(i), - Width: getWidth(i), + f := makeImageFile(i) + if i == imageIdxInZip { + f.ZipFileID = &fileIDs[fileIdxZip] } - created, err := qb.Create(image) + if err := fqb.Create(ctx, f); err != nil { + return fmt.Errorf("creating image file: %w", err) + } + imageFileIDs = append(imageFileIDs, f.ID) + + image := makeImage(i) + + err := qb.Create(ctx, &models.ImageCreateInput{ + Image: image, + FileIDs: []file.ID{f.ID}, + }) if err != nil { return fmt.Errorf("Error creating image %v+: %s", image, err.Error()) } - imageIDs = append(imageIDs, created.ID) + imageIDs = append(imageIDs, image.ID) } return nil @@ -715,24 +1097,83 @@ func getGalleryNullStringValue(index int, field string) sql.NullString { return getPrefixedNullStringValue("gallery", index, field) } -func createGalleries(gqb models.GalleryReaderWriter, n int) error { +func getGalleryNullStringPtr(index int, field string) *string { + return getStringPtr(getPrefixedStringValue("gallery", index, field)) +} + +func getGalleryBasename(index int) string { + return getGalleryStringValue(index, pathField) +} + +func makeGalleryFile(i int) *file.BaseFile { + return &file.BaseFile{ + Path: getFilePath(folderIdxWithGalleryFiles, getGalleryBasename(i)), + Basename: getGalleryBasename(i), + ParentFolderID: folderIDs[folderIdxWithGalleryFiles], + Fingerprints: []file.Fingerprint{ + { + Type: file.FingerprintTypeMD5, + Fingerprint: getGalleryStringValue(i, checksumField), + }, + }, + } +} + +func makeGallery(i int, includeScenes bool) *models.Gallery { + var studioID *int + if _, ok := galleryStudios[i]; ok { + v := studioIDs[galleryStudios[i]] + studioID = &v + } + + pids := indexesToIDs(performerIDs, galleryPerformers[i]) + tids := indexesToIDs(tagIDs, galleryTags[i]) + + ret := &models.Gallery{ + Title: getGalleryStringValue(i, titleField), + URL: getGalleryNullStringValue(i, urlField).String, + Rating: getIntPtr(getRating(i)), + Date: getObjectDateObject(i), + StudioID: studioID, + PerformerIDs: models.NewRelatedIDs(pids), + TagIDs: models.NewRelatedIDs(tids), + } + + if includeScenes { + ret.SceneIDs = models.NewRelatedIDs(indexesToIDs(sceneIDs, sceneGalleries.reverseLookup(i))) + } + + return ret +} + +func createGalleries(ctx context.Context, n int) error { + gqb := db.TxnRepository().Gallery + fqb := db.File + for i := 0; i < n; i++ { - gallery := models.Gallery{ - Path: models.NullString(getGalleryStringValue(i, pathField)), - Title: models.NullString(getGalleryStringValue(i, titleField)), - URL: getGalleryNullStringValue(i, urlField), - Checksum: getGalleryStringValue(i, checksumField), - Rating: getRating(i), - Date: getObjectDate(i), + var fileIDs []file.ID + if i != galleryIdxWithoutFile { + f := makeGalleryFile(i) + if err := fqb.Create(ctx, f); err != nil { + return fmt.Errorf("creating gallery file: %w", err) + } + galleryFileIDs = append(galleryFileIDs, f.ID) + fileIDs = []file.ID{f.ID} + } else { + galleryFileIDs = append(galleryFileIDs, 0) } - created, err := gqb.Create(gallery) + // gallery relationship will be created with galleries + const includeScenes = false + gallery := makeGallery(i, includeScenes) + + err := gqb.Create(ctx, gallery, fileIDs) if err != nil { return fmt.Errorf("Error creating gallery %v+: %s", gallery, err.Error()) } - galleryIDs = append(galleryIDs, created.ID) + galleryIDs = append(galleryIDs, gallery.ID) } return nil @@ -747,7 +1188,7 @@ func getMovieNullStringValue(index int, field string) sql.NullString { } // createMoviees creates n movies with plain Name and o movies with camel cased NaMe included -func createMovies(mqb models.MovieReaderWriter, n int, o int) error { +func createMovies(ctx context.Context, mqb models.MovieReaderWriter, n int, o int) error { const namePlain = "Name" const nameNoCase = "NaMe" @@ -768,7 +1209,7 @@ func createMovies(mqb models.MovieReaderWriter, n int, o int) error { Checksum: md5.FromString(name), } - created, err := mqb.Create(movie) + created, err := mqb.Create(ctx, movie) if err != nil { return fmt.Errorf("Error creating movie [%d] %v+: %s", i, movie, err.Error()) @@ -828,7 +1269,7 @@ func getIgnoreAutoTag(index int) bool { } // createPerformers creates n performers with plain Name and o performers with camel cased NaMe included -func createPerformers(pqb models.PerformerReaderWriter, n int, o int) error { +func createPerformers(ctx context.Context, pqb models.PerformerReaderWriter, n int, o int) error { const namePlain = "Name" const nameNoCase = "NaMe" @@ -864,7 +1305,7 @@ func createPerformers(pqb models.PerformerReaderWriter, n int, o int) error { performer.CareerLength = models.NullString(*careerLength) } - created, err := pqb.Create(performer) + created, err := pqb.Create(ctx, performer) if err != nil { return fmt.Errorf("Error creating performer %v+: %s", performer, err.Error()) @@ -942,7 +1383,7 @@ func getTagChildCount(id int) int { } //createTags creates n tags with plain Name and o tags with camel cased NaMe included -func createTags(tqb models.TagReaderWriter, n int, o int) error { +func createTags(ctx context.Context, tqb models.TagReaderWriter, n int, o int) error { const namePlain = "Name" const nameNoCase = "NaMe" @@ -962,7 +1403,7 @@ func createTags(tqb models.TagReaderWriter, n int, o int) error { IgnoreAutoTag: getIgnoreAutoTag(i), } - created, err := tqb.Create(tag) + created, err := tqb.Create(ctx, tag) if err != nil { return fmt.Errorf("Error creating tag %v+: %s", tag, err.Error()) @@ -970,7 +1411,7 @@ func createTags(tqb models.TagReaderWriter, n int, o int) error { // add alias alias := getTagStringValue(i, "Alias") - if err := tqb.UpdateAliases(created.ID, []string{alias}); err != nil { + if err := tqb.UpdateAliases(ctx, created.ID, []string{alias}); err != nil { return fmt.Errorf("error setting tag alias: %s", err.Error()) } @@ -989,7 +1430,7 @@ func getStudioNullStringValue(index int, field string) sql.NullString { return getPrefixedNullStringValue("studio", index, field) } -func createStudio(sqb models.StudioReaderWriter, name string, parentID *int64) (*models.Studio, error) { +func createStudio(ctx context.Context, sqb models.StudioReaderWriter, name string, parentID *int64) (*models.Studio, error) { studio := models.Studio{ Name: sql.NullString{String: name, Valid: true}, Checksum: md5.FromString(name), @@ -999,11 +1440,11 @@ func createStudio(sqb models.StudioReaderWriter, name string, parentID *int64) ( studio.ParentID = sql.NullInt64{Int64: *parentID, Valid: true} } - return createStudioFromModel(sqb, studio) + return createStudioFromModel(ctx, sqb, studio) } -func createStudioFromModel(sqb models.StudioReaderWriter, studio models.Studio) (*models.Studio, error) { - created, err := sqb.Create(studio) +func createStudioFromModel(ctx context.Context, sqb models.StudioReaderWriter, studio models.Studio) (*models.Studio, error) { + created, err := sqb.Create(ctx, studio) if err != nil { return nil, fmt.Errorf("Error creating studio %v+: %s", studio, err.Error()) @@ -1013,7 +1454,7 @@ func createStudioFromModel(sqb models.StudioReaderWriter, studio models.Studio) } // createStudios creates n studios with plain Name and o studios with camel cased NaMe included -func createStudios(sqb models.StudioReaderWriter, n int, o int) error { +func createStudios(ctx context.Context, sqb models.StudioReaderWriter, n int, o int) error { const namePlain = "Name" const nameNoCase = "NaMe" @@ -1034,7 +1475,7 @@ func createStudios(sqb models.StudioReaderWriter, n int, o int) error { URL: getStudioNullStringValue(index, urlField), IgnoreAutoTag: getIgnoreAutoTag(i), } - created, err := createStudioFromModel(sqb, studio) + created, err := createStudioFromModel(ctx, sqb, studio) if err != nil { return err @@ -1042,7 +1483,7 @@ func createStudios(sqb models.StudioReaderWriter, n int, o int) error { // add alias alias := getStudioStringValue(i, "Alias") - if err := sqb.UpdateAliases(created.ID, []string{alias}); err != nil { + if err := sqb.UpdateAliases(ctx, created.ID, []string{alias}); err != nil { return fmt.Errorf("error setting studio alias: %s", err.Error()) } @@ -1053,13 +1494,13 @@ func createStudios(sqb models.StudioReaderWriter, n int, o int) error { return nil } -func createMarker(mqb models.SceneMarkerReaderWriter, markerSpec markerSpec) error { +func createMarker(ctx context.Context, mqb models.SceneMarkerReaderWriter, markerSpec markerSpec) error { marker := models.SceneMarker{ SceneID: sql.NullInt64{Int64: int64(sceneIDs[markerSpec.sceneIdx]), Valid: true}, PrimaryTagID: tagIDs[markerSpec.primaryTagIdx], } - created, err := mqb.Create(marker) + created, err := mqb.Create(ctx, marker) if err != nil { return fmt.Errorf("error creating marker %v+: %w", marker, err) @@ -1074,7 +1515,7 @@ func createMarker(mqb models.SceneMarkerReaderWriter, markerSpec markerSpec) err newTagIDs = append(newTagIDs, tagIDs[tagIdx]) } - if err := mqb.UpdateTags(created.ID, newTagIDs); err != nil { + if err := mqb.UpdateTags(ctx, created.ID, newTagIDs); err != nil { return fmt.Errorf("error creating marker/tag join: %w", err) } } @@ -1107,7 +1548,7 @@ func getSavedFilterName(index int) string { return getPrefixedStringValue("savedFilter", index, "Name") } -func createSavedFilters(qb models.SavedFilterReaderWriter, n int) error { +func createSavedFilters(ctx context.Context, qb models.SavedFilterReaderWriter, n int) error { for i := 0; i < n; i++ { savedFilter := models.SavedFilter{ Mode: getSavedFilterMode(i), @@ -1115,7 +1556,7 @@ func createSavedFilters(qb models.SavedFilterReaderWriter, n int) error { Filter: getPrefixedStringValue("savedFilter", i, "Filter"), } - created, err := qb.Create(savedFilter) + created, err := qb.Create(ctx, savedFilter) if err != nil { return fmt.Errorf("Error creating saved filter %v+: %s", savedFilter, err.Error()) @@ -1137,184 +1578,49 @@ func doLinks(links [][2]int, fn func(idx1, idx2 int) error) error { return nil } -func linkPerformerTags(qb models.PerformerReaderWriter) error { +func linkPerformerTags(ctx context.Context, qb models.PerformerReaderWriter) error { return doLinks(performerTagLinks, func(performerIndex, tagIndex int) error { performerID := performerIDs[performerIndex] tagID := tagIDs[tagIndex] - tagIDs, err := qb.GetTagIDs(performerID) + tagIDs, err := qb.GetTagIDs(ctx, performerID) if err != nil { return err } tagIDs = intslice.IntAppendUnique(tagIDs, tagID) - return qb.UpdateTags(performerID, tagIDs) + return qb.UpdateTags(ctx, performerID, tagIDs) }) } -func linkSceneMovies(qb models.SceneReaderWriter) error { - return doLinks(sceneMovieLinks, func(sceneIndex, movieIndex int) error { - sceneID := sceneIDs[sceneIndex] - movies, err := qb.GetMovies(sceneID) - if err != nil { - return err - } - - movies = append(movies, models.MoviesScenes{ - MovieID: movieIDs[movieIndex], - SceneID: sceneID, - }) - return qb.UpdateMovies(sceneID, movies) - }) -} - -func linkScenePerformers(qb models.SceneReaderWriter) error { - return doLinks(scenePerformerLinks, func(sceneIndex, performerIndex int) error { - _, err := scene.AddPerformer(qb, sceneIDs[sceneIndex], performerIDs[performerIndex]) - return err - }) -} - -func linkSceneGalleries(qb models.SceneReaderWriter) error { - return doLinks(sceneGalleryLinks, func(sceneIndex, galleryIndex int) error { - _, err := scene.AddGallery(qb, sceneIDs[sceneIndex], galleryIDs[galleryIndex]) - return err - }) -} - -func linkSceneTags(qb models.SceneReaderWriter) error { - return doLinks(sceneTagLinks, func(sceneIndex, tagIndex int) error { - _, err := scene.AddTag(qb, sceneIDs[sceneIndex], tagIDs[tagIndex]) - return err - }) -} - -func linkSceneStudios(sqb models.SceneWriter) error { - return doLinks(sceneStudioLinks, func(sceneIndex, studioIndex int) error { - scene := models.ScenePartial{ - ID: sceneIDs[sceneIndex], - StudioID: &sql.NullInt64{Int64: int64(studioIDs[studioIndex]), Valid: true}, - } - _, err := sqb.Update(scene) - - return err - }) -} - -func linkImageGalleries(gqb models.GalleryReaderWriter) error { - return doLinks(imageGalleryLinks, func(imageIndex, galleryIndex int) error { - return gallery.AddImage(gqb, galleryIDs[galleryIndex], imageIDs[imageIndex]) - }) -} - -func linkImageTags(iqb models.ImageReaderWriter) error { - return doLinks(imageTagLinks, func(imageIndex, tagIndex int) error { - imageID := imageIDs[imageIndex] - tags, err := iqb.GetTagIDs(imageID) - if err != nil { - return err - } - - tags = append(tags, tagIDs[tagIndex]) - - return iqb.UpdateTags(imageID, tags) - }) -} - -func linkImageStudios(qb models.ImageWriter) error { - return doLinks(imageStudioLinks, func(imageIndex, studioIndex int) error { - image := models.ImagePartial{ - ID: imageIDs[imageIndex], - StudioID: &sql.NullInt64{Int64: int64(studioIDs[studioIndex]), Valid: true}, - } - _, err := qb.Update(image) - - return err - }) -} - -func linkImagePerformers(qb models.ImageReaderWriter) error { - return doLinks(imagePerformerLinks, func(imageIndex, performerIndex int) error { - imageID := imageIDs[imageIndex] - performers, err := qb.GetPerformerIDs(imageID) - if err != nil { - return err - } - - performers = append(performers, performerIDs[performerIndex]) - - return qb.UpdatePerformers(imageID, performers) - }) -} - -func linkGalleryPerformers(qb models.GalleryReaderWriter) error { - return doLinks(galleryPerformerLinks, func(galleryIndex, performerIndex int) error { - galleryID := galleryIDs[galleryIndex] - performers, err := qb.GetPerformerIDs(galleryID) - if err != nil { - return err - } - - performers = append(performers, performerIDs[performerIndex]) - - return qb.UpdatePerformers(galleryID, performers) - }) -} - -func linkGalleryStudios(qb models.GalleryReaderWriter) error { - return doLinks(galleryStudioLinks, func(galleryIndex, studioIndex int) error { - gallery := models.GalleryPartial{ - ID: galleryIDs[galleryIndex], - StudioID: &sql.NullInt64{Int64: int64(studioIDs[studioIndex]), Valid: true}, - } - _, err := qb.UpdatePartial(gallery) - - return err - }) -} - -func linkGalleryTags(qb models.GalleryReaderWriter) error { - return doLinks(galleryTagLinks, func(galleryIndex, tagIndex int) error { - galleryID := galleryIDs[galleryIndex] - tags, err := qb.GetTagIDs(galleryID) - if err != nil { - return err - } - - tags = append(tags, tagIDs[tagIndex]) - - return qb.UpdateTags(galleryID, tags) - }) -} - -func linkMovieStudios(mqb models.MovieWriter) error { +func linkMovieStudios(ctx context.Context, mqb models.MovieWriter) error { return doLinks(movieStudioLinks, func(movieIndex, studioIndex int) error { movie := models.MoviePartial{ ID: movieIDs[movieIndex], StudioID: &sql.NullInt64{Int64: int64(studioIDs[studioIndex]), Valid: true}, } - _, err := mqb.Update(movie) + _, err := mqb.Update(ctx, movie) return err }) } -func linkStudiosParent(qb models.StudioWriter) error { +func linkStudiosParent(ctx context.Context, qb models.StudioWriter) error { return doLinks(studioParentLinks, func(parentIndex, childIndex int) error { studio := models.StudioPartial{ ID: studioIDs[childIndex], ParentID: &sql.NullInt64{Int64: int64(studioIDs[parentIndex]), Valid: true}, } - _, err := qb.Update(studio) + _, err := qb.Update(ctx, studio) return err }) } -func linkTagsParent(qb models.TagReaderWriter) error { +func linkTagsParent(ctx context.Context, qb models.TagReaderWriter) error { return doLinks(tagParentLinks, func(parentIndex, childIndex int) error { tagID := tagIDs[childIndex] - parentTags, err := qb.FindByChildTagID(tagID) + parentTags, err := qb.FindByChildTagID(ctx, tagID) if err != nil { return err } @@ -1326,10 +1632,10 @@ func linkTagsParent(qb models.TagReaderWriter) error { parentIDs = append(parentIDs, tagIDs[parentIndex]) - return qb.UpdateParentTags(tagID, parentIDs) + return qb.UpdateParentTags(ctx, tagID, parentIDs) }) } -func addTagImage(qb models.TagWriter, tagIndex int) error { - return qb.UpdateImage(tagIDs[tagIndex], models.DefaultTagImage) +func addTagImage(ctx context.Context, qb models.TagWriter, tagIndex int) error { + return qb.UpdateImage(ctx, tagIDs[tagIndex], models.DefaultTagImage) } diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index a83612b0b..44920903e 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -1,6 +1,7 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" @@ -65,10 +66,6 @@ func getSort(sort string, direction string, tableName string) string { case strings.Compare(sort, "filesize") == 0: colName := getColumn(tableName, "size") return " ORDER BY cast(" + colName + " as integer) " + direction - case strings.Compare(sort, "perceptual_similarity") == 0: - colName := getColumn(tableName, "phash") - secondaryColName := getColumn(tableName, "size") - return " ORDER BY " + colName + " " + direction + ", " + secondaryColName + " DESC" case strings.HasPrefix(sort, randomSeedPrefix): // seed as a parameter from the UI // turn the provided seed into a float @@ -83,20 +80,17 @@ func getSort(sort string, direction string, tableName string) string { return getRandomSort(tableName, direction, randomSortFloat) default: colName := getColumn(tableName, sort) - var additional string - if tableName == "scenes" { - additional = ", bitrate DESC, framerate DESC, scenes.rating DESC, scenes.duration DESC" - } else if tableName == "scene_markers" { - additional = ", scene_markers.scene_id ASC, scene_markers.seconds ASC" + if strings.Contains(sort, ".") { + colName = sort } if strings.Compare(sort, "name") == 0 { - return " ORDER BY " + colName + " COLLATE NOCASE " + direction + additional + return " ORDER BY " + colName + " COLLATE NOCASE " + direction } if strings.Compare(sort, "title") == 0 { - return " ORDER BY " + colName + " COLLATE NATURAL_CS " + direction + additional + return " ORDER BY " + colName + " COLLATE NATURAL_CS " + direction } - return " ORDER BY " + colName + " " + direction + additional + return " ORDER BY " + colName + " " + direction } } @@ -111,7 +105,7 @@ func getCountSort(primaryTable, joinTable, primaryFK, direction string) string { return fmt.Sprintf(" ORDER BY (SELECT COUNT(*) FROM %s WHERE %s = %s.id) %s", joinTable, primaryFK, primaryTable, getSortDirection(direction)) } -func getSearchBinding(columns []string, q string, not bool) (string, []interface{}) { +func getStringSearchClause(columns []string, q string, not bool) sqlClause { var likeClauses []string var args []interface{} @@ -143,7 +137,7 @@ func getSearchBinding(columns []string, q string, not bool) (string, []interface } likes := strings.Join(likeClauses, binaryType) - return "(" + likes + ")", args + return makeClause("("+likes+")", args...) } func getInBinding(length int) string { @@ -225,8 +219,8 @@ func getCountCriterionClause(primaryTable, joinTable, primaryFK string, criterio return getIntCriterionWhereClause(lhs, criterion) } -func getImage(tx dbi, query string, args ...interface{}) ([]byte, error) { - rows, err := tx.Queryx(query, args...) +func getImage(ctx context.Context, tx dbWrapper, query string, args ...interface{}) ([]byte, error) { + rows, err := tx.Queryx(ctx, query, args...) if err != nil && !errors.Is(err, sql.ErrNoRows) { return nil, err diff --git a/pkg/sqlite/stash_id_test.go b/pkg/sqlite/stash_id_test.go index 0f57bef19..10949b475 100644 --- a/pkg/sqlite/stash_id_test.go +++ b/pkg/sqlite/stash_id_test.go @@ -4,6 +4,7 @@ package sqlite_test import ( + "context" "testing" "github.com/stashapp/stash/pkg/models" @@ -11,16 +12,16 @@ import ( ) type stashIDReaderWriter interface { - GetStashIDs(performerID int) ([]*models.StashID, error) - UpdateStashIDs(performerID int, stashIDs []models.StashID) error + GetStashIDs(ctx context.Context, performerID int) ([]models.StashID, error) + UpdateStashIDs(ctx context.Context, performerID int, stashIDs []models.StashID) error } -func testStashIDReaderWriter(t *testing.T, r stashIDReaderWriter, id int) { +func testStashIDReaderWriter(ctx context.Context, t *testing.T, r stashIDReaderWriter, id int) { // ensure no stash IDs to begin with - testNoStashIDs(t, r, id) + testNoStashIDs(ctx, t, r, id) // ensure GetStashIDs with non-existing also returns none - testNoStashIDs(t, r, -1) + testNoStashIDs(ctx, t, r, -1) // add stash ids const stashIDStr = "stashID" @@ -31,28 +32,28 @@ func testStashIDReaderWriter(t *testing.T, r stashIDReaderWriter, id int) { } // update stash ids and ensure was updated - if err := r.UpdateStashIDs(id, []models.StashID{stashID}); err != nil { + if err := r.UpdateStashIDs(ctx, id, []models.StashID{stashID}); err != nil { t.Error(err.Error()) } - testStashIDs(t, r, id, []*models.StashID{&stashID}) + testStashIDs(ctx, t, r, id, []models.StashID{stashID}) // update non-existing id - should return error - if err := r.UpdateStashIDs(-1, []models.StashID{stashID}); err == nil { + if err := r.UpdateStashIDs(ctx, -1, []models.StashID{stashID}); err == nil { t.Error("expected error when updating non-existing id") } // remove stash ids and ensure was updated - if err := r.UpdateStashIDs(id, []models.StashID{}); err != nil { + if err := r.UpdateStashIDs(ctx, id, []models.StashID{}); err != nil { t.Error(err.Error()) } - testNoStashIDs(t, r, id) + testNoStashIDs(ctx, t, r, id) } -func testNoStashIDs(t *testing.T, r stashIDReaderWriter, id int) { +func testNoStashIDs(ctx context.Context, t *testing.T, r stashIDReaderWriter, id int) { t.Helper() - stashIDs, err := r.GetStashIDs(id) + stashIDs, err := r.GetStashIDs(ctx, id) if err != nil { t.Error(err.Error()) return @@ -61,9 +62,9 @@ func testNoStashIDs(t *testing.T, r stashIDReaderWriter, id int) { assert.Len(t, stashIDs, 0) } -func testStashIDs(t *testing.T, r stashIDReaderWriter, id int, expected []*models.StashID) { +func testStashIDs(ctx context.Context, t *testing.T, r stashIDReaderWriter, id int, expected []models.StashID) { t.Helper() - stashIDs, err := r.GetStashIDs(id) + stashIDs, err := r.GetStashIDs(ctx, id) if err != nil { t.Error(err.Error()) return diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index cc810fe0c..e7b12c9e3 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -1,12 +1,16 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" "strings" + "github.com/doug-martin/goqu/v9" + "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) const studioTable = "studios" @@ -18,57 +22,54 @@ type studioQueryBuilder struct { repository } -func NewStudioReaderWriter(tx dbi) *studioQueryBuilder { - return &studioQueryBuilder{ - repository{ - tx: tx, - tableName: studioTable, - idColumn: idColumn, - }, - } +var StudioReaderWriter = &studioQueryBuilder{ + repository{ + tableName: studioTable, + idColumn: idColumn, + }, } -func (qb *studioQueryBuilder) Create(newObject models.Studio) (*models.Studio, error) { +func (qb *studioQueryBuilder) Create(ctx context.Context, newObject models.Studio) (*models.Studio, error) { var ret models.Studio - if err := qb.insertObject(newObject, &ret); err != nil { + if err := qb.insertObject(ctx, newObject, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *studioQueryBuilder) Update(updatedObject models.StudioPartial) (*models.Studio, error) { +func (qb *studioQueryBuilder) Update(ctx context.Context, updatedObject models.StudioPartial) (*models.Studio, error) { const partial = true - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } - return qb.Find(updatedObject.ID) + return qb.Find(ctx, updatedObject.ID) } -func (qb *studioQueryBuilder) UpdateFull(updatedObject models.Studio) (*models.Studio, error) { +func (qb *studioQueryBuilder) UpdateFull(ctx context.Context, updatedObject models.Studio) (*models.Studio, error) { const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } - return qb.Find(updatedObject.ID) + return qb.Find(ctx, updatedObject.ID) } -func (qb *studioQueryBuilder) Destroy(id int) error { +func (qb *studioQueryBuilder) Destroy(ctx context.Context, id int) error { // TODO - set null on foreign key in scraped items // remove studio from scraped items - _, err := qb.tx.Exec("UPDATE scraped_items SET studio_id = null WHERE studio_id = ?", id) + _, err := qb.tx.Exec(ctx, "UPDATE scraped_items SET studio_id = null WHERE studio_id = ?", id) if err != nil { return err } - return qb.destroyExisting([]int{id}) + return qb.destroyExisting(ctx, []int{id}) } -func (qb *studioQueryBuilder) Find(id int) (*models.Studio, error) { +func (qb *studioQueryBuilder) Find(ctx context.Context, id int) (*models.Studio, error) { var ret models.Studio - if err := qb.get(id, &ret); err != nil { + if err := qb.getByID(ctx, id, &ret); err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -77,65 +78,89 @@ func (qb *studioQueryBuilder) Find(id int) (*models.Studio, error) { return &ret, nil } -func (qb *studioQueryBuilder) FindMany(ids []int) ([]*models.Studio, error) { - var studios []*models.Studio - for _, id := range ids { - studio, err := qb.Find(id) - if err != nil { - return nil, err - } - - if studio == nil { - return nil, fmt.Errorf("studio with id %d not found", id) - } - - studios = append(studios, studio) +func (qb *studioQueryBuilder) FindMany(ctx context.Context, ids []int) ([]*models.Studio, error) { + tableMgr := studioTableMgr + q := goqu.Select("*").From(tableMgr.table).Where(tableMgr.byIDInts(ids...)) + unsorted, err := qb.getMany(ctx, q) + if err != nil { + return nil, err } - return studios, nil + ret := make([]*models.Studio, len(ids)) + + for _, s := range unsorted { + i := intslice.IntIndex(ids, s.ID) + ret[i] = s + } + + for i := range ret { + if ret[i] == nil { + return nil, fmt.Errorf("studio with id %d not found", ids[i]) + } + } + + return ret, nil } -func (qb *studioQueryBuilder) FindChildren(id int) ([]*models.Studio, error) { +func (qb *studioQueryBuilder) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Studio, error) { + const single = false + var ret []*models.Studio + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f models.Studio + if err := r.StructScan(&f); err != nil { + return err + } + + ret = append(ret, &f) + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (qb *studioQueryBuilder) FindChildren(ctx context.Context, id int) ([]*models.Studio, error) { query := "SELECT studios.* FROM studios WHERE studios.parent_id = ?" args := []interface{}{id} - return qb.queryStudios(query, args) + return qb.queryStudios(ctx, query, args) } -func (qb *studioQueryBuilder) FindBySceneID(sceneID int) (*models.Studio, error) { +func (qb *studioQueryBuilder) FindBySceneID(ctx context.Context, sceneID int) (*models.Studio, error) { query := "SELECT studios.* FROM studios JOIN scenes ON studios.id = scenes.studio_id WHERE scenes.id = ? LIMIT 1" args := []interface{}{sceneID} - return qb.queryStudio(query, args) + return qb.queryStudio(ctx, query, args) } -func (qb *studioQueryBuilder) FindByName(name string, nocase bool) (*models.Studio, error) { +func (qb *studioQueryBuilder) FindByName(ctx context.Context, name string, nocase bool) (*models.Studio, error) { query := "SELECT * FROM studios WHERE name = ?" if nocase { query += " COLLATE NOCASE" } query += " LIMIT 1" args := []interface{}{name} - return qb.queryStudio(query, args) + return qb.queryStudio(ctx, query, args) } -func (qb *studioQueryBuilder) FindByStashID(stashID models.StashID) ([]*models.Studio, error) { +func (qb *studioQueryBuilder) FindByStashID(ctx context.Context, stashID models.StashID) ([]*models.Studio, error) { query := selectAll("studios") + ` LEFT JOIN studio_stash_ids on studio_stash_ids.studio_id = studios.id WHERE studio_stash_ids.stash_id = ? AND studio_stash_ids.endpoint = ? ` args := []interface{}{stashID.StashID, stashID.Endpoint} - return qb.queryStudios(query, args) + return qb.queryStudios(ctx, query, args) } -func (qb *studioQueryBuilder) Count() (int, error) { - return qb.runCountQuery(qb.buildCountQuery("SELECT studios.id FROM studios"), nil) +func (qb *studioQueryBuilder) Count(ctx context.Context) (int, error) { + return qb.runCountQuery(ctx, qb.buildCountQuery("SELECT studios.id FROM studios"), nil) } -func (qb *studioQueryBuilder) All() ([]*models.Studio, error) { - return qb.queryStudios(selectAll("studios")+qb.getStudioSort(nil), nil) +func (qb *studioQueryBuilder) All(ctx context.Context) ([]*models.Studio, error) { + return qb.queryStudios(ctx, selectAll("studios")+qb.getStudioSort(nil), nil) } -func (qb *studioQueryBuilder) QueryForAutoTag(words []string) ([]*models.Studio, error) { +func (qb *studioQueryBuilder) QueryForAutoTag(ctx context.Context, words []string) ([]*models.Studio, error) { // TODO - Query needs to be changed to support queries of this type, and // this method should be removed query := selectAll(studioTable) @@ -159,7 +184,7 @@ func (qb *studioQueryBuilder) QueryForAutoTag(words []string) ([]*models.Studio, "studios.ignore_auto_tag = 0", whereOr, }, " AND ") - return qb.queryStudios(query+" WHERE "+where, args) + return qb.queryStudios(ctx, query+" WHERE "+where, args) } func (qb *studioQueryBuilder) validateFilter(filter *models.StudioFilterType) error { @@ -193,43 +218,43 @@ func (qb *studioQueryBuilder) validateFilter(filter *models.StudioFilterType) er return nil } -func (qb *studioQueryBuilder) makeFilter(studioFilter *models.StudioFilterType) *filterBuilder { +func (qb *studioQueryBuilder) makeFilter(ctx context.Context, studioFilter *models.StudioFilterType) *filterBuilder { query := &filterBuilder{} if studioFilter.And != nil { - query.and(qb.makeFilter(studioFilter.And)) + query.and(qb.makeFilter(ctx, studioFilter.And)) } if studioFilter.Or != nil { - query.or(qb.makeFilter(studioFilter.Or)) + query.or(qb.makeFilter(ctx, studioFilter.Or)) } if studioFilter.Not != nil { - query.not(qb.makeFilter(studioFilter.Not)) + query.not(qb.makeFilter(ctx, studioFilter.Not)) } - query.handleCriterion(stringCriterionHandler(studioFilter.Name, studioTable+".name")) - query.handleCriterion(stringCriterionHandler(studioFilter.Details, studioTable+".details")) - query.handleCriterion(stringCriterionHandler(studioFilter.URL, studioTable+".url")) - query.handleCriterion(intCriterionHandler(studioFilter.Rating, studioTable+".rating")) - query.handleCriterion(boolCriterionHandler(studioFilter.IgnoreAutoTag, studioTable+".ignore_auto_tag")) + query.handleCriterion(ctx, stringCriterionHandler(studioFilter.Name, studioTable+".name")) + query.handleCriterion(ctx, stringCriterionHandler(studioFilter.Details, studioTable+".details")) + query.handleCriterion(ctx, stringCriterionHandler(studioFilter.URL, studioTable+".url")) + query.handleCriterion(ctx, intCriterionHandler(studioFilter.Rating, studioTable+".rating", nil)) + query.handleCriterion(ctx, boolCriterionHandler(studioFilter.IgnoreAutoTag, studioTable+".ignore_auto_tag", nil)) - query.handleCriterion(criterionHandlerFunc(func(f *filterBuilder) { + query.handleCriterion(ctx, criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if studioFilter.StashID != nil { qb.stashIDRepository().join(f, "studio_stash_ids", "studios.id") - stringCriterionHandler(studioFilter.StashID, "studio_stash_ids.stash_id")(f) + stringCriterionHandler(studioFilter.StashID, "studio_stash_ids.stash_id")(ctx, f) } })) - query.handleCriterion(studioIsMissingCriterionHandler(qb, studioFilter.IsMissing)) - query.handleCriterion(studioSceneCountCriterionHandler(qb, studioFilter.SceneCount)) - query.handleCriterion(studioImageCountCriterionHandler(qb, studioFilter.ImageCount)) - query.handleCriterion(studioGalleryCountCriterionHandler(qb, studioFilter.GalleryCount)) - query.handleCriterion(studioParentCriterionHandler(qb, studioFilter.Parents)) - query.handleCriterion(studioAliasCriterionHandler(qb, studioFilter.Aliases)) + query.handleCriterion(ctx, studioIsMissingCriterionHandler(qb, studioFilter.IsMissing)) + query.handleCriterion(ctx, studioSceneCountCriterionHandler(qb, studioFilter.SceneCount)) + query.handleCriterion(ctx, studioImageCountCriterionHandler(qb, studioFilter.ImageCount)) + query.handleCriterion(ctx, studioGalleryCountCriterionHandler(qb, studioFilter.GalleryCount)) + query.handleCriterion(ctx, studioParentCriterionHandler(qb, studioFilter.Parents)) + query.handleCriterion(ctx, studioAliasCriterionHandler(qb, studioFilter.Aliases)) return query } -func (qb *studioQueryBuilder) Query(studioFilter *models.StudioFilterType, findFilter *models.FindFilterType) ([]*models.Studio, int, error) { +func (qb *studioQueryBuilder) Query(ctx context.Context, studioFilter *models.StudioFilterType, findFilter *models.FindFilterType) ([]*models.Studio, int, error) { if studioFilter == nil { studioFilter = &models.StudioFilterType{} } @@ -250,31 +275,26 @@ func (qb *studioQueryBuilder) Query(studioFilter *models.StudioFilterType, findF if err := qb.validateFilter(studioFilter); err != nil { return nil, 0, err } - filter := qb.makeFilter(studioFilter) + filter := qb.makeFilter(ctx, studioFilter) query.addFilter(filter) query.sortAndPagination = qb.getStudioSort(findFilter) + getPagination(findFilter) - idsResult, countResult, err := query.executeFind() + idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err } - var studios []*models.Studio - for _, id := range idsResult { - studio, err := qb.Find(id) - if err != nil { - return nil, 0, err - } - - studios = append(studios, studio) + studios, err := qb.FindMany(ctx, idsResult) + if err != nil { + return nil, 0, err } return studios, countResult, nil } func studioIsMissingCriterionHandler(qb *studioQueryBuilder, isMissing *string) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if isMissing != nil && *isMissing != "" { switch *isMissing { case "image": @@ -291,7 +311,7 @@ func studioIsMissingCriterionHandler(qb *studioQueryBuilder, isMissing *string) } func studioSceneCountCriterionHandler(qb *studioQueryBuilder, sceneCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if sceneCount != nil { f.addLeftJoin("scenes", "", "scenes.studio_id = studios.id") clause, args := getIntCriterionWhereClause("count(distinct scenes.id)", *sceneCount) @@ -302,7 +322,7 @@ func studioSceneCountCriterionHandler(qb *studioQueryBuilder, sceneCount *models } func studioImageCountCriterionHandler(qb *studioQueryBuilder, imageCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if imageCount != nil { f.addLeftJoin("images", "", "images.studio_id = studios.id") clause, args := getIntCriterionWhereClause("count(distinct images.id)", *imageCount) @@ -313,7 +333,7 @@ func studioImageCountCriterionHandler(qb *studioQueryBuilder, imageCount *models } func studioGalleryCountCriterionHandler(qb *studioQueryBuilder, galleryCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if galleryCount != nil { f.addLeftJoin("galleries", "", "galleries.studio_id = studios.id") clause, args := getIntCriterionWhereClause("count(distinct galleries.id)", *galleryCount) @@ -373,17 +393,17 @@ func (qb *studioQueryBuilder) getStudioSort(findFilter *models.FindFilterType) s } } -func (qb *studioQueryBuilder) queryStudio(query string, args []interface{}) (*models.Studio, error) { - results, err := qb.queryStudios(query, args) +func (qb *studioQueryBuilder) queryStudio(ctx context.Context, query string, args []interface{}) (*models.Studio, error) { + results, err := qb.queryStudios(ctx, query, args) if err != nil || len(results) < 1 { return nil, err } return results[0], nil } -func (qb *studioQueryBuilder) queryStudios(query string, args []interface{}) ([]*models.Studio, error) { +func (qb *studioQueryBuilder) queryStudios(ctx context.Context, query string, args []interface{}) ([]*models.Studio, error) { var ret models.Studios - if err := qb.query(query, args, &ret); err != nil { + if err := qb.query(ctx, query, args, &ret); err != nil { return nil, err } @@ -401,20 +421,20 @@ func (qb *studioQueryBuilder) imageRepository() *imageRepository { } } -func (qb *studioQueryBuilder) GetImage(studioID int) ([]byte, error) { - return qb.imageRepository().get(studioID) +func (qb *studioQueryBuilder) GetImage(ctx context.Context, studioID int) ([]byte, error) { + return qb.imageRepository().get(ctx, studioID) } -func (qb *studioQueryBuilder) HasImage(studioID int) (bool, error) { - return qb.imageRepository().exists(studioID) +func (qb *studioQueryBuilder) HasImage(ctx context.Context, studioID int) (bool, error) { + return qb.imageRepository().exists(ctx, studioID) } -func (qb *studioQueryBuilder) UpdateImage(studioID int, image []byte) error { - return qb.imageRepository().replace(studioID, image) +func (qb *studioQueryBuilder) UpdateImage(ctx context.Context, studioID int, image []byte) error { + return qb.imageRepository().replace(ctx, studioID, image) } -func (qb *studioQueryBuilder) DestroyImage(studioID int) error { - return qb.imageRepository().destroy([]int{studioID}) +func (qb *studioQueryBuilder) DestroyImage(ctx context.Context, studioID int) error { + return qb.imageRepository().destroy(ctx, []int{studioID}) } func (qb *studioQueryBuilder) stashIDRepository() *stashIDRepository { @@ -427,12 +447,12 @@ func (qb *studioQueryBuilder) stashIDRepository() *stashIDRepository { } } -func (qb *studioQueryBuilder) GetStashIDs(studioID int) ([]*models.StashID, error) { - return qb.stashIDRepository().get(studioID) +func (qb *studioQueryBuilder) GetStashIDs(ctx context.Context, studioID int) ([]models.StashID, error) { + return qb.stashIDRepository().get(ctx, studioID) } -func (qb *studioQueryBuilder) UpdateStashIDs(studioID int, stashIDs []models.StashID) error { - return qb.stashIDRepository().replace(studioID, stashIDs) +func (qb *studioQueryBuilder) UpdateStashIDs(ctx context.Context, studioID int, stashIDs []models.StashID) error { + return qb.stashIDRepository().replace(ctx, studioID, stashIDs) } func (qb *studioQueryBuilder) aliasRepository() *stringRepository { @@ -446,10 +466,10 @@ func (qb *studioQueryBuilder) aliasRepository() *stringRepository { } } -func (qb *studioQueryBuilder) GetAliases(studioID int) ([]string, error) { - return qb.aliasRepository().get(studioID) +func (qb *studioQueryBuilder) GetAliases(ctx context.Context, studioID int) ([]string, error) { + return qb.aliasRepository().get(ctx, studioID) } -func (qb *studioQueryBuilder) UpdateAliases(studioID int, aliases []string) error { - return qb.aliasRepository().replace(studioID, aliases) +func (qb *studioQueryBuilder) UpdateAliases(ctx context.Context, studioID int, aliases []string) error { + return qb.aliasRepository().replace(ctx, studioID, aliases) } diff --git a/pkg/sqlite/studio_test.go b/pkg/sqlite/studio_test.go index 08e6a30da..5de18fddf 100644 --- a/pkg/sqlite/studio_test.go +++ b/pkg/sqlite/studio_test.go @@ -4,6 +4,7 @@ package sqlite_test import ( + "context" "database/sql" "errors" "fmt" @@ -13,16 +14,17 @@ import ( "testing" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sqlite" "github.com/stretchr/testify/assert" ) func TestStudioFindByName(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter name := studioNames[studioIdxWithScene] // find a studio by name - studio, err := sqb.FindByName(name, false) + studio, err := sqb.FindByName(ctx, name, false) if err != nil { t.Errorf("Error finding studios: %s", err.Error()) @@ -32,7 +34,7 @@ func TestStudioFindByName(t *testing.T) { name = studioNames[studioIdxWithDupName] // find a studio by name nocase - studio, err = sqb.FindByName(name, true) + studio, err = sqb.FindByName(ctx, name, true) if err != nil { t.Errorf("Error finding studios: %s", err.Error()) @@ -67,10 +69,10 @@ func TestStudioQueryNameOr(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter - studios := queryStudio(t, sqb, &studioFilter, nil) + studios := queryStudio(ctx, t, sqb, &studioFilter, nil) assert.Len(t, studios, 2) assert.Equal(t, studio1Name, studios[0].Name.String) @@ -98,10 +100,10 @@ func TestStudioQueryNameAndUrl(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter - studios := queryStudio(t, sqb, &studioFilter, nil) + studios := queryStudio(ctx, t, sqb, &studioFilter, nil) assert.Len(t, studios, 1) assert.Equal(t, studioName, studios[0].Name.String) @@ -133,10 +135,10 @@ func TestStudioQueryNameNotUrl(t *testing.T) { }, } - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter - studios := queryStudio(t, sqb, &studioFilter, nil) + studios := queryStudio(ctx, t, sqb, &studioFilter, nil) for _, studio := range studios { verifyString(t, studio.Name.String, nameCriterion) @@ -164,20 +166,20 @@ func TestStudioIllegalQuery(t *testing.T) { Or: &subFilter, } - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter - _, _, err := sqb.Query(studioFilter, nil) + _, _, err := sqb.Query(ctx, studioFilter, nil) assert.NotNil(err) studioFilter.Or = nil studioFilter.Not = &subFilter - _, _, err = sqb.Query(studioFilter, nil) + _, _, err = sqb.Query(ctx, studioFilter, nil) assert.NotNil(err) studioFilter.And = nil studioFilter.Or = &subFilter - _, _, err = sqb.Query(studioFilter, nil) + _, _, err = sqb.Query(ctx, studioFilter, nil) assert.NotNil(err) return nil @@ -185,15 +187,15 @@ func TestStudioIllegalQuery(t *testing.T) { } func TestStudioQueryIgnoreAutoTag(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { ignoreAutoTag := true studioFilter := models.StudioFilterType{ IgnoreAutoTag: &ignoreAutoTag, } - sqb := r.Studio() + sqb := sqlite.StudioReaderWriter - studios := queryStudio(t, sqb, &studioFilter, nil) + studios := queryStudio(ctx, t, sqb, &studioFilter, nil) assert.Len(t, studios, int(math.Ceil(float64(totalStudios)/5))) for _, s := range studios { @@ -205,12 +207,12 @@ func TestStudioQueryIgnoreAutoTag(t *testing.T) { } func TestStudioQueryForAutoTag(t *testing.T) { - withTxn(func(r models.Repository) error { - tqb := r.Studio() + withTxn(func(ctx context.Context) error { + tqb := sqlite.StudioReaderWriter name := studioNames[studioIdxWithMovie] // find a studio by name - studios, err := tqb.QueryForAutoTag([]string{name}) + studios, err := tqb.QueryForAutoTag(ctx, []string{name}) if err != nil { t.Errorf("Error finding studios: %s", err.Error()) @@ -221,7 +223,7 @@ func TestStudioQueryForAutoTag(t *testing.T) { // find by alias name = getStudioStringValue(studioIdxWithMovie, "Alias") - studios, err = tqb.QueryForAutoTag([]string{name}) + studios, err = tqb.QueryForAutoTag(ctx, []string{name}) if err != nil { t.Errorf("Error finding studios: %s", err.Error()) @@ -235,8 +237,8 @@ func TestStudioQueryForAutoTag(t *testing.T) { } func TestStudioQueryParent(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter studioCriterion := models.MultiCriterionInput{ Value: []string{ strconv.Itoa(studioIDs[studioIdxWithChildStudio]), @@ -248,7 +250,7 @@ func TestStudioQueryParent(t *testing.T) { Parents: &studioCriterion, } - studios, _, err := sqb.Query(&studioFilter, nil) + studios, _, err := sqb.Query(ctx, &studioFilter, nil) if err != nil { t.Errorf("Error querying studio: %s", err.Error()) } @@ -270,7 +272,7 @@ func TestStudioQueryParent(t *testing.T) { Q: &q, } - studios, _, err = sqb.Query(&studioFilter, &findFilter) + studios, _, err = sqb.Query(ctx, &studioFilter, &findFilter) if err != nil { t.Errorf("Error querying studio: %s", err.Error()) } @@ -285,28 +287,28 @@ func TestStudioDestroyParent(t *testing.T) { const childName = "child" // create parent and child studios - if err := withTxn(func(r models.Repository) error { - createdParent, err := createStudio(r.Studio(), parentName, nil) + if err := withTxn(func(ctx context.Context) error { + createdParent, err := createStudio(ctx, sqlite.StudioReaderWriter, parentName, nil) if err != nil { return fmt.Errorf("Error creating parent studio: %s", err.Error()) } parentID := int64(createdParent.ID) - createdChild, err := createStudio(r.Studio(), childName, &parentID) + createdChild, err := createStudio(ctx, sqlite.StudioReaderWriter, childName, &parentID) if err != nil { return fmt.Errorf("Error creating child studio: %s", err.Error()) } - sqb := r.Studio() + sqb := sqlite.StudioReaderWriter // destroy the parent - err = sqb.Destroy(createdParent.ID) + err = sqb.Destroy(ctx, createdParent.ID) if err != nil { return fmt.Errorf("Error destroying parent studio: %s", err.Error()) } // destroy the child - err = sqb.Destroy(createdChild.ID) + err = sqb.Destroy(ctx, createdChild.ID) if err != nil { return fmt.Errorf("Error destroying child studio: %s", err.Error()) } @@ -318,10 +320,10 @@ func TestStudioDestroyParent(t *testing.T) { } func TestStudioFindChildren(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter - studios, err := sqb.FindChildren(studioIDs[studioIdxWithChildStudio]) + studios, err := sqb.FindChildren(ctx, studioIDs[studioIdxWithChildStudio]) if err != nil { t.Errorf("error calling FindChildren: %s", err.Error()) @@ -330,7 +332,7 @@ func TestStudioFindChildren(t *testing.T) { assert.Len(t, studios, 1) assert.Equal(t, studioIDs[studioIdxWithParentStudio], studios[0].ID) - studios, err = sqb.FindChildren(0) + studios, err = sqb.FindChildren(ctx, 0) if err != nil { t.Errorf("error calling FindChildren: %s", err.Error()) @@ -347,19 +349,19 @@ func TestStudioUpdateClearParent(t *testing.T) { const childName = "clearParent_child" // create parent and child studios - if err := withTxn(func(r models.Repository) error { - createdParent, err := createStudio(r.Studio(), parentName, nil) + if err := withTxn(func(ctx context.Context) error { + createdParent, err := createStudio(ctx, sqlite.StudioReaderWriter, parentName, nil) if err != nil { return fmt.Errorf("Error creating parent studio: %s", err.Error()) } parentID := int64(createdParent.ID) - createdChild, err := createStudio(r.Studio(), childName, &parentID) + createdChild, err := createStudio(ctx, sqlite.StudioReaderWriter, childName, &parentID) if err != nil { return fmt.Errorf("Error creating child studio: %s", err.Error()) } - sqb := r.Studio() + sqb := sqlite.StudioReaderWriter // clear the parent id from the child updatePartial := models.StudioPartial{ @@ -367,7 +369,7 @@ func TestStudioUpdateClearParent(t *testing.T) { ParentID: &sql.NullInt64{Valid: false}, } - updatedStudio, err := sqb.Update(updatePartial) + updatedStudio, err := sqb.Update(ctx, updatePartial) if err != nil { return fmt.Errorf("Error updated studio: %s", err.Error()) @@ -384,31 +386,31 @@ func TestStudioUpdateClearParent(t *testing.T) { } func TestStudioUpdateStudioImage(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Studio() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.StudioReaderWriter // create performer to test against const name = "TestStudioUpdateStudioImage" - created, err := createStudio(r.Studio(), name, nil) + created, err := createStudio(ctx, sqlite.StudioReaderWriter, name, nil) if err != nil { return fmt.Errorf("Error creating studio: %s", err.Error()) } image := []byte("image") - err = qb.UpdateImage(created.ID, image) + err = qb.UpdateImage(ctx, created.ID, image) if err != nil { return fmt.Errorf("Error updating studio image: %s", err.Error()) } // ensure image set - storedImage, err := qb.GetImage(created.ID) + storedImage, err := qb.GetImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting image: %s", err.Error()) } assert.Equal(t, storedImage, image) // set nil image - err = qb.UpdateImage(created.ID, nil) + err = qb.UpdateImage(ctx, created.ID, nil) if err == nil { return fmt.Errorf("Expected error setting nil image") } @@ -420,29 +422,29 @@ func TestStudioUpdateStudioImage(t *testing.T) { } func TestStudioDestroyStudioImage(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Studio() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.StudioReaderWriter // create performer to test against const name = "TestStudioDestroyStudioImage" - created, err := createStudio(r.Studio(), name, nil) + created, err := createStudio(ctx, sqlite.StudioReaderWriter, name, nil) if err != nil { return fmt.Errorf("Error creating studio: %s", err.Error()) } image := []byte("image") - err = qb.UpdateImage(created.ID, image) + err = qb.UpdateImage(ctx, created.ID, image) if err != nil { return fmt.Errorf("Error updating studio image: %s", err.Error()) } - err = qb.DestroyImage(created.ID) + err = qb.DestroyImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error destroying studio image: %s", err.Error()) } // image should be nil - storedImage, err := qb.GetImage(created.ID) + storedImage, err := qb.GetImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting image: %s", err.Error()) } @@ -474,17 +476,17 @@ func TestStudioQuerySceneCount(t *testing.T) { } func verifyStudiosSceneCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter studioFilter := models.StudioFilterType{ SceneCount: &sceneCountCriterion, } - studios := queryStudio(t, sqb, &studioFilter, nil) + studios := queryStudio(ctx, t, sqb, &studioFilter, nil) assert.Greater(t, len(studios), 0) for _, studio := range studios { - sceneCount, err := r.Scene().CountByStudioID(studio.ID) + sceneCount, err := db.Scene.CountByStudioID(ctx, studio.ID) if err != nil { return err } @@ -515,19 +517,19 @@ func TestStudioQueryImageCount(t *testing.T) { } func verifyStudiosImageCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter studioFilter := models.StudioFilterType{ ImageCount: &imageCountCriterion, } - studios := queryStudio(t, sqb, &studioFilter, nil) + studios := queryStudio(ctx, t, sqb, &studioFilter, nil) assert.Greater(t, len(studios), 0) for _, studio := range studios { pp := 0 - result, err := r.Image().Query(models.ImageQueryOptions{ + result, err := db.Image.Query(ctx, models.ImageQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: &models.FindFilterType{ PerPage: &pp, @@ -571,19 +573,19 @@ func TestStudioQueryGalleryCount(t *testing.T) { } func verifyStudiosGalleryCount(t *testing.T, galleryCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter studioFilter := models.StudioFilterType{ GalleryCount: &galleryCountCriterion, } - studios := queryStudio(t, sqb, &studioFilter, nil) + studios := queryStudio(ctx, t, sqb, &studioFilter, nil) assert.Greater(t, len(studios), 0) for _, studio := range studios { pp := 0 - _, count, err := r.Gallery().Query(&models.GalleryFilterType{ + _, count, err := db.Gallery.Query(ctx, &models.GalleryFilterType{ Studios: &models.HierarchicalMultiCriterionInput{ Value: []string{strconv.Itoa(studio.ID)}, Modifier: models.CriterionModifierIncludes, @@ -602,17 +604,17 @@ func verifyStudiosGalleryCount(t *testing.T, galleryCountCriterion models.IntCri } func TestStudioStashIDs(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Studio() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.StudioReaderWriter // create studio to test against const name = "TestStudioStashIDs" - created, err := createStudio(r.Studio(), name, nil) + created, err := createStudio(ctx, sqlite.StudioReaderWriter, name, nil) if err != nil { return fmt.Errorf("Error creating studio: %s", err.Error()) } - testStashIDReaderWriter(t, qb, created.ID) + testStashIDReaderWriter(ctx, t, qb, created.ID) return nil }); err != nil { t.Error(err.Error()) @@ -632,7 +634,7 @@ func TestStudioQueryURL(t *testing.T) { URL: &urlCriterion, } - verifyFn := func(g *models.Studio, r models.Repository) { + verifyFn := func(ctx context.Context, g *models.Studio) { t.Helper() verifyNullString(t, g.URL, urlCriterion) } @@ -682,18 +684,18 @@ func TestStudioQueryRating(t *testing.T) { verifyStudiosRating(t, ratingCriterion) } -func verifyStudioQuery(t *testing.T, filter models.StudioFilterType, verifyFn func(s *models.Studio, r models.Repository)) { - withTxn(func(r models.Repository) error { +func verifyStudioQuery(t *testing.T, filter models.StudioFilterType, verifyFn func(ctx context.Context, s *models.Studio)) { + withTxn(func(ctx context.Context) error { t.Helper() - sqb := r.Studio() + sqb := sqlite.StudioReaderWriter - studios := queryStudio(t, sqb, &filter, nil) + studios := queryStudio(ctx, t, sqb, &filter, nil) // assume it should find at least one assert.Greater(t, len(studios), 0) for _, studio := range studios { - verifyFn(studio, r) + verifyFn(ctx, studio) } return nil @@ -701,13 +703,13 @@ func verifyStudioQuery(t *testing.T, filter models.StudioFilterType, verifyFn fu } func verifyStudiosRating(t *testing.T, ratingCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter studioFilter := models.StudioFilterType{ Rating: &ratingCriterion, } - studios, _, err := sqb.Query(&studioFilter, nil) + studios, _, err := sqb.Query(ctx, &studioFilter, nil) if err != nil { t.Errorf("Error querying studio: %s", err.Error()) @@ -722,14 +724,14 @@ func verifyStudiosRating(t *testing.T, ratingCriterion models.IntCriterionInput) } func TestStudioQueryIsMissingRating(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter isMissing := "rating" studioFilter := models.StudioFilterType{ IsMissing: &isMissing, } - studios, _, err := sqb.Query(&studioFilter, nil) + studios, _, err := sqb.Query(ctx, &studioFilter, nil) if err != nil { t.Errorf("Error querying studio: %s", err.Error()) @@ -745,8 +747,8 @@ func TestStudioQueryIsMissingRating(t *testing.T) { }) } -func queryStudio(t *testing.T, sqb models.StudioReader, studioFilter *models.StudioFilterType, findFilter *models.FindFilterType) []*models.Studio { - studios, _, err := sqb.Query(studioFilter, findFilter) +func queryStudio(ctx context.Context, t *testing.T, sqb models.StudioReader, studioFilter *models.StudioFilterType, findFilter *models.FindFilterType) []*models.Studio { + studios, _, err := sqb.Query(ctx, studioFilter, findFilter) if err != nil { t.Errorf("Error querying studio: %s", err.Error()) } @@ -767,7 +769,7 @@ func TestStudioQueryName(t *testing.T) { Name: nameCriterion, } - verifyFn := func(studio *models.Studio, r models.Repository) { + verifyFn := func(ctx context.Context, studio *models.Studio) { verifyNullString(t, studio.Name, *nameCriterion) } @@ -797,8 +799,8 @@ func TestStudioQueryAlias(t *testing.T) { Aliases: aliasCriterion, } - verifyFn := func(studio *models.Studio, r models.Repository) { - aliases, err := r.Studio().GetAliases(studio.ID) + verifyFn := func(ctx context.Context, studio *models.Studio) { + aliases, err := sqlite.StudioReaderWriter.GetAliases(ctx, studio.ID) if err != nil { t.Errorf("Error querying studios: %s", err.Error()) } @@ -825,24 +827,24 @@ func TestStudioQueryAlias(t *testing.T) { } func TestStudioUpdateAlias(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Studio() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.StudioReaderWriter // create studio to test against const name = "TestStudioUpdateAlias" - created, err := createStudio(qb, name, nil) + created, err := createStudio(ctx, qb, name, nil) if err != nil { return fmt.Errorf("Error creating studio: %s", err.Error()) } aliases := []string{"alias1", "alias2"} - err = qb.UpdateAliases(created.ID, aliases) + err = qb.UpdateAliases(ctx, created.ID, aliases) if err != nil { return fmt.Errorf("Error updating studio aliases: %s", err.Error()) } // ensure aliases set - storedAliases, err := qb.GetAliases(created.ID) + storedAliases, err := qb.GetAliases(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting aliases: %s", err.Error()) } @@ -922,11 +924,11 @@ func TestStudioQueryFast(t *testing.T) { } - withTxn(func(r models.Repository) error { - sqb := r.Studio() + withTxn(func(ctx context.Context) error { + sqb := sqlite.StudioReaderWriter for _, f := range filters { for _, ff := range findFilters { - _, _, err := sqb.Query(&f, &ff) + _, _, err := sqb.Query(ctx, &f, &ff) if err != nil { t.Errorf("Error querying studio: %s", err.Error()) } diff --git a/pkg/sqlite/table.go b/pkg/sqlite/table.go new file mode 100644 index 000000000..c0b08fa6b --- /dev/null +++ b/pkg/sqlite/table.go @@ -0,0 +1,642 @@ +package sqlite + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" + "github.com/jmoiron/sqlx" + "gopkg.in/guregu/null.v4" + + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" +) + +type table struct { + table exp.IdentifierExpression + idColumn exp.IdentifierExpression +} + +type NotFoundError struct { + ID int + Table string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("id %d does not exist in %s", e.ID, e.Table) +} + +func (t *table) insert(ctx context.Context, o interface{}) (sql.Result, error) { + q := dialect.Insert(t.table).Prepared(true).Rows(o) + ret, err := exec(ctx, q) + if err != nil { + return nil, fmt.Errorf("inserting into %s: %w", t.table.GetTable(), err) + } + + return ret, nil +} + +func (t *table) insertID(ctx context.Context, o interface{}) (int, error) { + result, err := t.insert(ctx, o) + if err != nil { + return 0, err + } + + ret, err := result.LastInsertId() + if err != nil { + return 0, err + } + + return int(ret), nil +} + +func (t *table) updateByID(ctx context.Context, id interface{}, o interface{}) error { + q := dialect.Update(t.table).Prepared(true).Set(o).Where(t.byID(id)) + + if _, err := exec(ctx, q); err != nil { + return fmt.Errorf("updating %s: %w", t.table.GetTable(), err) + } + + return nil +} + +func (t *table) byID(id interface{}) exp.Expression { + return t.idColumn.Eq(id) +} + +func (t *table) byIDInts(ids ...int) exp.Expression { + ii := make([]interface{}, len(ids)) + for i, id := range ids { + ii[i] = id + } + return t.idColumn.In(ii...) +} + +func (t *table) idExists(ctx context.Context, id interface{}) (bool, error) { + q := dialect.Select(goqu.COUNT("*")).From(t.table).Where(t.byID(id)) + + var count int + if err := querySimple(ctx, q, &count); err != nil { + return false, err + } + + return count == 1, nil +} + +func (t *table) checkIDExists(ctx context.Context, id int) error { + exists, err := t.idExists(ctx, id) + if err != nil { + return err + } + + if !exists { + return &NotFoundError{ID: id, Table: t.table.GetTable()} + } + + return nil +} + +func (t *table) destroyExisting(ctx context.Context, ids []int) error { + for _, id := range ids { + exists, err := t.idExists(ctx, id) + if err != nil { + return err + } + + if !exists { + return &NotFoundError{ + ID: id, + Table: t.table.GetTable(), + } + } + } + + return t.destroy(ctx, ids) +} + +func (t *table) destroy(ctx context.Context, ids []int) error { + q := dialect.Delete(t.table).Where(t.idColumn.In(ids)) + + if _, err := exec(ctx, q); err != nil { + return fmt.Errorf("destroying %s: %w", t.table.GetTable(), err) + } + + return nil +} + +// func (t *table) get(ctx context.Context, q *goqu.SelectDataset, dest interface{}) error { +// tx, err := getTx(ctx) +// if err != nil { +// return err +// } + +// sql, args, err := q.ToSQL() +// if err != nil { +// return fmt.Errorf("generating sql: %w", err) +// } + +// return tx.GetContext(ctx, dest, sql, args...) +// } + +type joinTable struct { + table + fkColumn exp.IdentifierExpression +} + +func (t *joinTable) invert() *joinTable { + return &joinTable{ + table: table{ + table: t.table.table, + idColumn: t.fkColumn, + }, + fkColumn: t.table.idColumn, + } +} + +func (t *joinTable) get(ctx context.Context, id int) ([]int, error) { + q := dialect.Select(t.fkColumn).From(t.table.table).Where(t.idColumn.Eq(id)) + + const single = false + var ret []int + if err := queryFunc(ctx, q, single, func(rows *sqlx.Rows) error { + var fk int + if err := rows.Scan(&fk); err != nil { + return err + } + + ret = append(ret, fk) + + return nil + }); err != nil { + return nil, fmt.Errorf("getting foreign keys from %s: %w", t.table.table.GetTable(), err) + } + + return ret, nil +} + +func (t *joinTable) insertJoin(ctx context.Context, id, foreignID int) (sql.Result, error) { + q := dialect.Insert(t.table.table).Cols(t.idColumn.GetCol(), t.fkColumn.GetCol()).Vals( + goqu.Vals{id, foreignID}, + ) + ret, err := exec(ctx, q) + if err != nil { + return nil, fmt.Errorf("inserting into %s: %w", t.table.table.GetTable(), err) + } + + return ret, nil +} + +func (t *joinTable) insertJoins(ctx context.Context, id int, foreignIDs []int) error { + for _, fk := range foreignIDs { + if _, err := t.insertJoin(ctx, id, fk); err != nil { + return err + } + } + + return nil +} + +func (t *joinTable) replaceJoins(ctx context.Context, id int, foreignIDs []int) error { + if err := t.destroy(ctx, []int{id}); err != nil { + return err + } + + return t.insertJoins(ctx, id, foreignIDs) +} + +func (t *joinTable) addJoins(ctx context.Context, id int, foreignIDs []int) error { + // get existing foreign keys + fks, err := t.get(ctx, id) + if err != nil { + return err + } + + // only add foreign keys that are not already present + foreignIDs = intslice.IntExclude(foreignIDs, fks) + return t.insertJoins(ctx, id, foreignIDs) +} + +func (t *joinTable) destroyJoins(ctx context.Context, id int, foreignIDs []int) error { + q := dialect.Delete(t.table.table).Where( + t.idColumn.Eq(id), + t.fkColumn.In(foreignIDs), + ) + + if _, err := exec(ctx, q); err != nil { + return fmt.Errorf("destroying %s: %w", t.table.table.GetTable(), err) + } + + return nil +} + +func (t *joinTable) modifyJoins(ctx context.Context, id int, foreignIDs []int, mode models.RelationshipUpdateMode) error { + switch mode { + case models.RelationshipUpdateModeSet: + return t.replaceJoins(ctx, id, foreignIDs) + case models.RelationshipUpdateModeAdd: + return t.addJoins(ctx, id, foreignIDs) + case models.RelationshipUpdateModeRemove: + return t.destroyJoins(ctx, id, foreignIDs) + } + + return nil +} + +type stashIDTable struct { + table +} + +type stashIDRow struct { + StashID null.String `db:"stash_id"` + Endpoint null.String `db:"endpoint"` +} + +func (r *stashIDRow) resolve() *models.StashID { + return &models.StashID{ + StashID: r.StashID.String, + Endpoint: r.Endpoint.String, + } +} + +func (t *stashIDTable) get(ctx context.Context, id int) ([]*models.StashID, error) { + q := dialect.Select("endpoint", "stash_id").From(t.table.table).Where(t.idColumn.Eq(id)) + + const single = false + var ret []*models.StashID + if err := queryFunc(ctx, q, single, func(rows *sqlx.Rows) error { + var v stashIDRow + if err := rows.StructScan(&v); err != nil { + return err + } + + ret = append(ret, v.resolve()) + + return nil + }); err != nil { + return nil, fmt.Errorf("getting stash ids from %s: %w", t.table.table.GetTable(), err) + } + + return ret, nil +} + +func (t *stashIDTable) insertJoin(ctx context.Context, id int, v models.StashID) (sql.Result, error) { + q := dialect.Insert(t.table.table).Cols(t.idColumn.GetCol(), "endpoint", "stash_id").Vals( + goqu.Vals{id, v.Endpoint, v.StashID}, + ) + ret, err := exec(ctx, q) + if err != nil { + return nil, fmt.Errorf("inserting into %s: %w", t.table.table.GetTable(), err) + } + + return ret, nil +} + +func (t *stashIDTable) insertJoins(ctx context.Context, id int, v []models.StashID) error { + for _, fk := range v { + if _, err := t.insertJoin(ctx, id, fk); err != nil { + return err + } + } + + return nil +} + +func (t *stashIDTable) replaceJoins(ctx context.Context, id int, v []models.StashID) error { + if err := t.destroy(ctx, []int{id}); err != nil { + return err + } + + return t.insertJoins(ctx, id, v) +} + +func (t *stashIDTable) addJoins(ctx context.Context, id int, v []models.StashID) error { + // get existing foreign keys + fks, err := t.get(ctx, id) + if err != nil { + return err + } + + // only add values that are not already present + var filtered []models.StashID + for _, vv := range v { + for _, e := range fks { + if vv.Endpoint == e.Endpoint { + continue + } + + filtered = append(filtered, vv) + } + } + return t.insertJoins(ctx, id, filtered) +} + +func (t *stashIDTable) destroyJoins(ctx context.Context, id int, v []models.StashID) error { + for _, vv := range v { + q := dialect.Delete(t.table.table).Where( + t.idColumn.Eq(id), + t.table.table.Col("endpoint").Eq(vv.Endpoint), + t.table.table.Col("stash_id").Eq(vv.StashID), + ) + + if _, err := exec(ctx, q); err != nil { + return fmt.Errorf("destroying %s: %w", t.table.table.GetTable(), err) + } + } + + return nil +} + +func (t *stashIDTable) modifyJoins(ctx context.Context, id int, v []models.StashID, mode models.RelationshipUpdateMode) error { + switch mode { + case models.RelationshipUpdateModeSet: + return t.replaceJoins(ctx, id, v) + case models.RelationshipUpdateModeAdd: + return t.addJoins(ctx, id, v) + case models.RelationshipUpdateModeRemove: + return t.destroyJoins(ctx, id, v) + } + + return nil +} + +type scenesMoviesTable struct { + table +} + +type moviesScenesRow struct { + SceneID null.Int `db:"scene_id"` + MovieID null.Int `db:"movie_id"` + SceneIndex null.Int `db:"scene_index"` +} + +func (r moviesScenesRow) resolve(sceneID int) models.MoviesScenes { + return models.MoviesScenes{ + MovieID: int(r.MovieID.Int64), + SceneIndex: nullIntPtr(r.SceneIndex), + } +} + +func (t *scenesMoviesTable) get(ctx context.Context, id int) ([]models.MoviesScenes, error) { + q := dialect.Select("movie_id", "scene_index").From(t.table.table).Where(t.idColumn.Eq(id)) + + const single = false + var ret []models.MoviesScenes + if err := queryFunc(ctx, q, single, func(rows *sqlx.Rows) error { + var v moviesScenesRow + if err := rows.StructScan(&v); err != nil { + return err + } + + ret = append(ret, v.resolve(id)) + + return nil + }); err != nil { + return nil, fmt.Errorf("getting scene movies from %s: %w", t.table.table.GetTable(), err) + } + + return ret, nil +} + +func (t *scenesMoviesTable) insertJoin(ctx context.Context, id int, v models.MoviesScenes) (sql.Result, error) { + q := dialect.Insert(t.table.table).Cols(t.idColumn.GetCol(), "movie_id", "scene_index").Vals( + goqu.Vals{id, v.MovieID, intFromPtr(v.SceneIndex)}, + ) + ret, err := exec(ctx, q) + if err != nil { + return nil, fmt.Errorf("inserting into %s: %w", t.table.table.GetTable(), err) + } + + return ret, nil +} + +func (t *scenesMoviesTable) insertJoins(ctx context.Context, id int, v []models.MoviesScenes) error { + for _, fk := range v { + if _, err := t.insertJoin(ctx, id, fk); err != nil { + return err + } + } + + return nil +} + +func (t *scenesMoviesTable) replaceJoins(ctx context.Context, id int, v []models.MoviesScenes) error { + if err := t.destroy(ctx, []int{id}); err != nil { + return err + } + + return t.insertJoins(ctx, id, v) +} + +func (t *scenesMoviesTable) addJoins(ctx context.Context, id int, v []models.MoviesScenes) error { + // get existing foreign keys + fks, err := t.get(ctx, id) + if err != nil { + return err + } + + // only add values that are not already present + var filtered []models.MoviesScenes + for _, vv := range v { + for _, e := range fks { + if vv.MovieID == e.MovieID { + continue + } + + filtered = append(filtered, vv) + } + } + return t.insertJoins(ctx, id, filtered) +} + +func (t *scenesMoviesTable) destroyJoins(ctx context.Context, id int, v []models.MoviesScenes) error { + for _, vv := range v { + q := dialect.Delete(t.table.table).Where( + t.idColumn.Eq(id), + t.table.table.Col("movie_id").Eq(vv.MovieID), + ) + + if _, err := exec(ctx, q); err != nil { + return fmt.Errorf("destroying %s: %w", t.table.table.GetTable(), err) + } + } + + return nil +} + +func (t *scenesMoviesTable) modifyJoins(ctx context.Context, id int, v []models.MoviesScenes, mode models.RelationshipUpdateMode) error { + switch mode { + case models.RelationshipUpdateModeSet: + return t.replaceJoins(ctx, id, v) + case models.RelationshipUpdateModeAdd: + return t.addJoins(ctx, id, v) + case models.RelationshipUpdateModeRemove: + return t.destroyJoins(ctx, id, v) + } + + return nil +} + +type relatedFilesTable struct { + table +} + +// type scenesFilesRow struct { +// SceneID int `db:"scene_id"` +// Primary bool `db:"primary"` +// FileID file.ID `db:"file_id"` +// } + +func (t *relatedFilesTable) insertJoin(ctx context.Context, id int, primary bool, fileID file.ID) error { + q := dialect.Insert(t.table.table).Cols(t.idColumn.GetCol(), "primary", "file_id").Vals( + goqu.Vals{id, primary, fileID}, + ) + _, err := exec(ctx, q) + if err != nil { + return fmt.Errorf("inserting into %s: %w", t.table.table.GetTable(), err) + } + + return nil +} + +func (t *relatedFilesTable) insertJoins(ctx context.Context, id int, firstPrimary bool, fileIDs []file.ID) error { + for i, fk := range fileIDs { + if err := t.insertJoin(ctx, id, firstPrimary && i == 0, fk); err != nil { + return err + } + } + + return nil +} + +func (t *relatedFilesTable) replaceJoins(ctx context.Context, id int, fileIDs []file.ID) error { + if err := t.destroy(ctx, []int{id}); err != nil { + return err + } + + const firstPrimary = true + return t.insertJoins(ctx, id, firstPrimary, fileIDs) +} + +func (t *relatedFilesTable) setPrimary(ctx context.Context, id int, fileID file.ID) error { + table := t.table.table + + q := dialect.Update(table).Prepared(true).Set(goqu.Record{ + "primary": 0, + }).Where(t.idColumn.Eq(id), table.Col(fileIDColumn).Neq(fileID)) + + if _, err := exec(ctx, q); err != nil { + return fmt.Errorf("unsetting primary flags in %s: %w", t.table.table.GetTable(), err) + } + + q = dialect.Update(table).Prepared(true).Set(goqu.Record{ + "primary": 1, + }).Where(t.idColumn.Eq(id), table.Col(fileIDColumn).Eq(fileID)) + + if _, err := exec(ctx, q); err != nil { + return fmt.Errorf("setting primary flag in %s: %w", t.table.table.GetTable(), err) + } + + return nil +} + +type sqler interface { + ToSQL() (sql string, params []interface{}, err error) +} + +func exec(ctx context.Context, stmt sqler) (sql.Result, error) { + tx, err := getTx(ctx) + if err != nil { + return nil, err + } + + sql, args, err := stmt.ToSQL() + if err != nil { + return nil, fmt.Errorf("generating sql: %w", err) + } + + logger.Tracef("SQL: %s [%v]", sql, args) + ret, err := tx.ExecContext(ctx, sql, args...) + if err != nil { + return nil, fmt.Errorf("executing `%s` [%v]: %w", sql, args, err) + } + + return ret, nil +} + +func count(ctx context.Context, q *goqu.SelectDataset) (int, error) { + var count int + if err := querySimple(ctx, q, &count); err != nil { + return 0, err + } + + return count, nil +} + +func queryFunc(ctx context.Context, query *goqu.SelectDataset, single bool, f func(rows *sqlx.Rows) error) error { + q, args, err := query.ToSQL() + if err != nil { + return err + } + + wrapper := dbWrapper{} + rows, err := wrapper.QueryxContext(ctx, q, args...) + + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("querying `%s` [%v]: %w", q, args, err) + } + defer rows.Close() + + for rows.Next() { + if err := f(rows); err != nil { + return err + } + if single { + break + } + } + + if err := rows.Err(); err != nil { + return err + } + + return nil +} + +func querySimple(ctx context.Context, query *goqu.SelectDataset, out interface{}) error { + q, args, err := query.ToSQL() + if err != nil { + return err + } + + wrapper := dbWrapper{} + rows, err := wrapper.QueryxContext(ctx, q, args...) + if err != nil { + return fmt.Errorf("querying `%s` [%v]: %w", q, args, err) + } + defer rows.Close() + + if rows.Next() { + if err := rows.Scan(out); err != nil { + return err + } + } + + if err := rows.Err(); err != nil { + return err + } + + return nil +} + +// func cols(table exp.IdentifierExpression, cols []string) []interface{} { +// var ret []interface{} +// for _, c := range cols { +// ret = append(ret, table.Col(c)) +// } +// return ret +// } diff --git a/pkg/sqlite/tables.go b/pkg/sqlite/tables.go new file mode 100644 index 000000000..6acc985e7 --- /dev/null +++ b/pkg/sqlite/tables.go @@ -0,0 +1,204 @@ +package sqlite + +import ( + "github.com/doug-martin/goqu/v9" + + _ "github.com/doug-martin/goqu/v9/dialect/sqlite3" +) + +var dialect = goqu.Dialect("sqlite3") + +var ( + galleriesImagesJoinTable = goqu.T(galleriesImagesTable) + imagesTagsJoinTable = goqu.T(imagesTagsTable) + performersImagesJoinTable = goqu.T(performersImagesTable) + imagesFilesJoinTable = goqu.T(imagesFilesTable) + + galleriesFilesJoinTable = goqu.T(galleriesFilesTable) + galleriesTagsJoinTable = goqu.T(galleriesTagsTable) + performersGalleriesJoinTable = goqu.T(performersGalleriesTable) + galleriesScenesJoinTable = goqu.T(galleriesScenesTable) + + scenesFilesJoinTable = goqu.T(scenesFilesTable) + scenesTagsJoinTable = goqu.T(scenesTagsTable) + scenesPerformersJoinTable = goqu.T(performersScenesTable) + scenesStashIDsJoinTable = goqu.T("scene_stash_ids") + scenesMoviesJoinTable = goqu.T(moviesScenesTable) +) + +var ( + imageTableMgr = &table{ + table: goqu.T(imageTable), + idColumn: goqu.T(imageTable).Col(idColumn), + } + + imagesFilesTableMgr = &relatedFilesTable{ + table: table{ + table: imagesFilesJoinTable, + idColumn: imagesFilesJoinTable.Col(imageIDColumn), + }, + } + + imageGalleriesTableMgr = &joinTable{ + table: table{ + table: galleriesImagesJoinTable, + idColumn: galleriesImagesJoinTable.Col(imageIDColumn), + }, + fkColumn: galleriesImagesJoinTable.Col(galleryIDColumn), + } + + imagesTagsTableMgr = &joinTable{ + table: table{ + table: imagesTagsJoinTable, + idColumn: imagesTagsJoinTable.Col(imageIDColumn), + }, + fkColumn: imagesTagsJoinTable.Col(tagIDColumn), + } + + imagesPerformersTableMgr = &joinTable{ + table: table{ + table: performersImagesJoinTable, + idColumn: performersImagesJoinTable.Col(imageIDColumn), + }, + fkColumn: performersImagesJoinTable.Col(performerIDColumn), + } +) + +var ( + galleryTableMgr = &table{ + table: goqu.T(galleryTable), + idColumn: goqu.T(galleryTable).Col(idColumn), + } + + galleriesFilesTableMgr = &relatedFilesTable{ + table: table{ + table: galleriesFilesJoinTable, + idColumn: galleriesFilesJoinTable.Col(galleryIDColumn), + }, + } + + galleriesTagsTableMgr = &joinTable{ + table: table{ + table: galleriesTagsJoinTable, + idColumn: galleriesTagsJoinTable.Col(galleryIDColumn), + }, + fkColumn: galleriesTagsJoinTable.Col(tagIDColumn), + } + + galleriesPerformersTableMgr = &joinTable{ + table: table{ + table: performersGalleriesJoinTable, + idColumn: performersGalleriesJoinTable.Col(galleryIDColumn), + }, + fkColumn: performersGalleriesJoinTable.Col(performerIDColumn), + } + + galleriesScenesTableMgr = &joinTable{ + table: table{ + table: galleriesScenesJoinTable, + idColumn: galleriesScenesJoinTable.Col(galleryIDColumn), + }, + fkColumn: galleriesScenesJoinTable.Col(sceneIDColumn), + } +) + +var ( + sceneTableMgr = &table{ + table: goqu.T(sceneTable), + idColumn: goqu.T(sceneTable).Col(idColumn), + } + + scenesFilesTableMgr = &relatedFilesTable{ + table: table{ + table: scenesFilesJoinTable, + idColumn: scenesFilesJoinTable.Col(sceneIDColumn), + }, + } + + scenesTagsTableMgr = &joinTable{ + table: table{ + table: scenesTagsJoinTable, + idColumn: scenesTagsJoinTable.Col(sceneIDColumn), + }, + fkColumn: scenesTagsJoinTable.Col(tagIDColumn), + } + + scenesPerformersTableMgr = &joinTable{ + table: table{ + table: scenesPerformersJoinTable, + idColumn: scenesPerformersJoinTable.Col(sceneIDColumn), + }, + fkColumn: scenesPerformersJoinTable.Col(performerIDColumn), + } + + scenesGalleriesTableMgr = galleriesScenesTableMgr.invert() + + scenesStashIDsTableMgr = &stashIDTable{ + table: table{ + table: scenesStashIDsJoinTable, + idColumn: scenesStashIDsJoinTable.Col(sceneIDColumn), + }, + } + + scenesMoviesTableMgr = &scenesMoviesTable{ + table: table{ + table: scenesMoviesJoinTable, + idColumn: scenesMoviesJoinTable.Col(sceneIDColumn), + }, + } +) + +var ( + fileTableMgr = &table{ + table: goqu.T(fileTable), + idColumn: goqu.T(fileTable).Col(idColumn), + } + + videoFileTableMgr = &table{ + table: goqu.T(videoFileTable), + idColumn: goqu.T(videoFileTable).Col(fileIDColumn), + } + + imageFileTableMgr = &table{ + table: goqu.T(imageFileTable), + idColumn: goqu.T(imageFileTable).Col(fileIDColumn), + } + + folderTableMgr = &table{ + table: goqu.T(folderTable), + idColumn: goqu.T(folderTable).Col(idColumn), + } + + fingerprintTableMgr = &table{ + table: goqu.T(fingerprintTable), + idColumn: goqu.T(fingerprintTable).Col(idColumn), + } +) + +var ( + performerTableMgr = &table{ + table: goqu.T(performerTable), + idColumn: goqu.T(performerTable).Col(idColumn), + } +) + +var ( + studioTableMgr = &table{ + table: goqu.T(studioTable), + idColumn: goqu.T(studioTable).Col(idColumn), + } +) + +var ( + tagTableMgr = &table{ + table: goqu.T(tagTable), + idColumn: goqu.T(tagTable).Col(idColumn), + } +) + +var ( + movieTableMgr = &table{ + table: goqu.T(movieTable), + idColumn: goqu.T(movieTable).Col(idColumn), + } +) diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 9513a269b..d951f3d3b 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -1,12 +1,16 @@ package sqlite import ( + "context" "database/sql" "errors" "fmt" "strings" + "github.com/doug-martin/goqu/v9" + "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) const tagTable = "tags" @@ -18,53 +22,50 @@ type tagQueryBuilder struct { repository } -func NewTagReaderWriter(tx dbi) *tagQueryBuilder { - return &tagQueryBuilder{ - repository{ - tx: tx, - tableName: tagTable, - idColumn: idColumn, - }, - } +var TagReaderWriter = &tagQueryBuilder{ + repository{ + tableName: tagTable, + idColumn: idColumn, + }, } -func (qb *tagQueryBuilder) Create(newObject models.Tag) (*models.Tag, error) { +func (qb *tagQueryBuilder) Create(ctx context.Context, newObject models.Tag) (*models.Tag, error) { var ret models.Tag - if err := qb.insertObject(newObject, &ret); err != nil { + if err := qb.insertObject(ctx, newObject, &ret); err != nil { return nil, err } return &ret, nil } -func (qb *tagQueryBuilder) Update(updatedObject models.TagPartial) (*models.Tag, error) { +func (qb *tagQueryBuilder) Update(ctx context.Context, updatedObject models.TagPartial) (*models.Tag, error) { const partial = true - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } - return qb.Find(updatedObject.ID) + return qb.Find(ctx, updatedObject.ID) } -func (qb *tagQueryBuilder) UpdateFull(updatedObject models.Tag) (*models.Tag, error) { +func (qb *tagQueryBuilder) UpdateFull(ctx context.Context, updatedObject models.Tag) (*models.Tag, error) { const partial = false - if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil { + if err := qb.update(ctx, updatedObject.ID, updatedObject, partial); err != nil { return nil, err } - return qb.Find(updatedObject.ID) + return qb.Find(ctx, updatedObject.ID) } -func (qb *tagQueryBuilder) Destroy(id int) error { +func (qb *tagQueryBuilder) Destroy(ctx context.Context, id int) error { // TODO - add delete cascade to foreign key // delete tag from scenes and markers first - _, err := qb.tx.Exec("DELETE FROM scenes_tags WHERE tag_id = ?", id) + _, err := qb.tx.Exec(ctx, "DELETE FROM scenes_tags WHERE tag_id = ?", id) if err != nil { return err } // TODO - add delete cascade to foreign key - _, err = qb.tx.Exec("DELETE FROM scene_markers_tags WHERE tag_id = ?", id) + _, err = qb.tx.Exec(ctx, "DELETE FROM scene_markers_tags WHERE tag_id = ?", id) if err != nil { return err } @@ -72,7 +73,7 @@ func (qb *tagQueryBuilder) Destroy(id int) error { // cannot unset primary_tag_id in scene_markers because it is not nullable countQuery := "SELECT COUNT(*) as count FROM scene_markers where primary_tag_id = ?" args := []interface{}{id} - primaryMarkers, err := qb.runCountQuery(countQuery, args) + primaryMarkers, err := qb.runCountQuery(ctx, countQuery, args) if err != nil { return err } @@ -81,12 +82,12 @@ func (qb *tagQueryBuilder) Destroy(id int) error { return errors.New("cannot delete tag used as a primary tag in scene markers") } - return qb.destroyExisting([]int{id}) + return qb.destroyExisting(ctx, []int{id}) } -func (qb *tagQueryBuilder) Find(id int) (*models.Tag, error) { +func (qb *tagQueryBuilder) Find(ctx context.Context, id int) (*models.Tag, error) { var ret models.Tag - if err := qb.get(id, &ret); err != nil { + if err := qb.getByID(ctx, id, &ret); err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -95,25 +96,49 @@ func (qb *tagQueryBuilder) Find(id int) (*models.Tag, error) { return &ret, nil } -func (qb *tagQueryBuilder) FindMany(ids []int) ([]*models.Tag, error) { - var tags []*models.Tag - for _, id := range ids { - tag, err := qb.Find(id) - if err != nil { - return nil, err - } - - if tag == nil { - return nil, fmt.Errorf("tag with id %d not found", id) - } - - tags = append(tags, tag) +func (qb *tagQueryBuilder) FindMany(ctx context.Context, ids []int) ([]*models.Tag, error) { + tableMgr := tagTableMgr + q := goqu.Select("*").From(tableMgr.table).Where(tableMgr.byIDInts(ids...)) + unsorted, err := qb.getMany(ctx, q) + if err != nil { + return nil, err } - return tags, nil + ret := make([]*models.Tag, len(ids)) + + for _, s := range unsorted { + i := intslice.IntIndex(ids, s.ID) + ret[i] = s + } + + for i := range ret { + if ret[i] == nil { + return nil, fmt.Errorf("tag with id %d not found", ids[i]) + } + } + + return ret, nil } -func (qb *tagQueryBuilder) FindBySceneID(sceneID int) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Tag, error) { + const single = false + var ret []*models.Tag + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f models.Tag + if err := r.StructScan(&f); err != nil { + return err + } + + ret = append(ret, &f) + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (qb *tagQueryBuilder) FindBySceneID(ctx context.Context, sceneID int) ([]*models.Tag, error) { query := ` SELECT tags.* FROM tags LEFT JOIN scenes_tags as scenes_join on scenes_join.tag_id = tags.id @@ -122,10 +147,10 @@ func (qb *tagQueryBuilder) FindBySceneID(sceneID int) ([]*models.Tag, error) { ` query += qb.getDefaultTagSort() args := []interface{}{sceneID} - return qb.queryTags(query, args) + return qb.queryTags(ctx, query, args) } -func (qb *tagQueryBuilder) FindByPerformerID(performerID int) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) FindByPerformerID(ctx context.Context, performerID int) ([]*models.Tag, error) { query := ` SELECT tags.* FROM tags LEFT JOIN performers_tags as performers_join on performers_join.tag_id = tags.id @@ -134,10 +159,10 @@ func (qb *tagQueryBuilder) FindByPerformerID(performerID int) ([]*models.Tag, er ` query += qb.getDefaultTagSort() args := []interface{}{performerID} - return qb.queryTags(query, args) + return qb.queryTags(ctx, query, args) } -func (qb *tagQueryBuilder) FindByImageID(imageID int) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) FindByImageID(ctx context.Context, imageID int) ([]*models.Tag, error) { query := ` SELECT tags.* FROM tags LEFT JOIN images_tags as images_join on images_join.tag_id = tags.id @@ -146,10 +171,10 @@ func (qb *tagQueryBuilder) FindByImageID(imageID int) ([]*models.Tag, error) { ` query += qb.getDefaultTagSort() args := []interface{}{imageID} - return qb.queryTags(query, args) + return qb.queryTags(ctx, query, args) } -func (qb *tagQueryBuilder) FindByGalleryID(galleryID int) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Tag, error) { query := ` SELECT tags.* FROM tags LEFT JOIN galleries_tags as galleries_join on galleries_join.tag_id = tags.id @@ -158,10 +183,10 @@ func (qb *tagQueryBuilder) FindByGalleryID(galleryID int) ([]*models.Tag, error) ` query += qb.getDefaultTagSort() args := []interface{}{galleryID} - return qb.queryTags(query, args) + return qb.queryTags(ctx, query, args) } -func (qb *tagQueryBuilder) FindBySceneMarkerID(sceneMarkerID int) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) FindBySceneMarkerID(ctx context.Context, sceneMarkerID int) ([]*models.Tag, error) { query := ` SELECT tags.* FROM tags LEFT JOIN scene_markers_tags as scene_markers_join on scene_markers_join.tag_id = tags.id @@ -170,20 +195,20 @@ func (qb *tagQueryBuilder) FindBySceneMarkerID(sceneMarkerID int) ([]*models.Tag ` query += qb.getDefaultTagSort() args := []interface{}{sceneMarkerID} - return qb.queryTags(query, args) + return qb.queryTags(ctx, query, args) } -func (qb *tagQueryBuilder) FindByName(name string, nocase bool) (*models.Tag, error) { +func (qb *tagQueryBuilder) FindByName(ctx context.Context, name string, nocase bool) (*models.Tag, error) { query := "SELECT * FROM tags WHERE name = ?" if nocase { query += " COLLATE NOCASE" } query += " LIMIT 1" args := []interface{}{name} - return qb.queryTag(query, args) + return qb.queryTag(ctx, query, args) } -func (qb *tagQueryBuilder) FindByNames(names []string, nocase bool) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Tag, error) { query := "SELECT * FROM tags WHERE name" if nocase { query += " COLLATE NOCASE" @@ -193,10 +218,10 @@ func (qb *tagQueryBuilder) FindByNames(names []string, nocase bool) ([]*models.T for _, name := range names { args = append(args, name) } - return qb.queryTags(query, args) + return qb.queryTags(ctx, query, args) } -func (qb *tagQueryBuilder) FindByParentTagID(parentID int) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) FindByParentTagID(ctx context.Context, parentID int) ([]*models.Tag, error) { query := ` SELECT tags.* FROM tags INNER JOIN tags_relations ON tags_relations.child_id = tags.id @@ -204,10 +229,10 @@ func (qb *tagQueryBuilder) FindByParentTagID(parentID int) ([]*models.Tag, error ` query += qb.getDefaultTagSort() args := []interface{}{parentID} - return qb.queryTags(query, args) + return qb.queryTags(ctx, query, args) } -func (qb *tagQueryBuilder) FindByChildTagID(parentID int) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) FindByChildTagID(ctx context.Context, parentID int) ([]*models.Tag, error) { query := ` SELECT tags.* FROM tags INNER JOIN tags_relations ON tags_relations.parent_id = tags.id @@ -215,18 +240,18 @@ func (qb *tagQueryBuilder) FindByChildTagID(parentID int) ([]*models.Tag, error) ` query += qb.getDefaultTagSort() args := []interface{}{parentID} - return qb.queryTags(query, args) + return qb.queryTags(ctx, query, args) } -func (qb *tagQueryBuilder) Count() (int, error) { - return qb.runCountQuery(qb.buildCountQuery("SELECT tags.id FROM tags"), nil) +func (qb *tagQueryBuilder) Count(ctx context.Context) (int, error) { + return qb.runCountQuery(ctx, qb.buildCountQuery("SELECT tags.id FROM tags"), nil) } -func (qb *tagQueryBuilder) All() ([]*models.Tag, error) { - return qb.queryTags(selectAll("tags")+qb.getDefaultTagSort(), nil) +func (qb *tagQueryBuilder) All(ctx context.Context) ([]*models.Tag, error) { + return qb.queryTags(ctx, selectAll("tags")+qb.getDefaultTagSort(), nil) } -func (qb *tagQueryBuilder) QueryForAutoTag(words []string) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) QueryForAutoTag(ctx context.Context, words []string) ([]*models.Tag, error) { // TODO - Query needs to be changed to support queries of this type, and // this method should be removed query := selectAll(tagTable) @@ -250,7 +275,7 @@ func (qb *tagQueryBuilder) QueryForAutoTag(words []string) ([]*models.Tag, error "tags.ignore_auto_tag = 0", whereOr, }, " AND ") - return qb.queryTags(query+" WHERE "+where, args) + return qb.queryTags(ctx, query+" WHERE "+where, args) } func (qb *tagQueryBuilder) validateFilter(tagFilter *models.TagFilterType) error { @@ -284,38 +309,38 @@ func (qb *tagQueryBuilder) validateFilter(tagFilter *models.TagFilterType) error return nil } -func (qb *tagQueryBuilder) makeFilter(tagFilter *models.TagFilterType) *filterBuilder { +func (qb *tagQueryBuilder) makeFilter(ctx context.Context, tagFilter *models.TagFilterType) *filterBuilder { query := &filterBuilder{} if tagFilter.And != nil { - query.and(qb.makeFilter(tagFilter.And)) + query.and(qb.makeFilter(ctx, tagFilter.And)) } if tagFilter.Or != nil { - query.or(qb.makeFilter(tagFilter.Or)) + query.or(qb.makeFilter(ctx, tagFilter.Or)) } if tagFilter.Not != nil { - query.not(qb.makeFilter(tagFilter.Not)) + query.not(qb.makeFilter(ctx, tagFilter.Not)) } - query.handleCriterion(stringCriterionHandler(tagFilter.Name, tagTable+".name")) - query.handleCriterion(tagAliasCriterionHandler(qb, tagFilter.Aliases)) - query.handleCriterion(boolCriterionHandler(tagFilter.IgnoreAutoTag, tagTable+".ignore_auto_tag")) + query.handleCriterion(ctx, stringCriterionHandler(tagFilter.Name, tagTable+".name")) + query.handleCriterion(ctx, tagAliasCriterionHandler(qb, tagFilter.Aliases)) + query.handleCriterion(ctx, boolCriterionHandler(tagFilter.IgnoreAutoTag, tagTable+".ignore_auto_tag", nil)) - query.handleCriterion(tagIsMissingCriterionHandler(qb, tagFilter.IsMissing)) - query.handleCriterion(tagSceneCountCriterionHandler(qb, tagFilter.SceneCount)) - query.handleCriterion(tagImageCountCriterionHandler(qb, tagFilter.ImageCount)) - query.handleCriterion(tagGalleryCountCriterionHandler(qb, tagFilter.GalleryCount)) - query.handleCriterion(tagPerformerCountCriterionHandler(qb, tagFilter.PerformerCount)) - query.handleCriterion(tagMarkerCountCriterionHandler(qb, tagFilter.MarkerCount)) - query.handleCriterion(tagParentsCriterionHandler(qb, tagFilter.Parents)) - query.handleCriterion(tagChildrenCriterionHandler(qb, tagFilter.Children)) - query.handleCriterion(tagParentCountCriterionHandler(qb, tagFilter.ParentCount)) - query.handleCriterion(tagChildCountCriterionHandler(qb, tagFilter.ChildCount)) + query.handleCriterion(ctx, tagIsMissingCriterionHandler(qb, tagFilter.IsMissing)) + query.handleCriterion(ctx, tagSceneCountCriterionHandler(qb, tagFilter.SceneCount)) + query.handleCriterion(ctx, tagImageCountCriterionHandler(qb, tagFilter.ImageCount)) + query.handleCriterion(ctx, tagGalleryCountCriterionHandler(qb, tagFilter.GalleryCount)) + query.handleCriterion(ctx, tagPerformerCountCriterionHandler(qb, tagFilter.PerformerCount)) + query.handleCriterion(ctx, tagMarkerCountCriterionHandler(qb, tagFilter.MarkerCount)) + query.handleCriterion(ctx, tagParentsCriterionHandler(qb, tagFilter.Parents)) + query.handleCriterion(ctx, tagChildrenCriterionHandler(qb, tagFilter.Children)) + query.handleCriterion(ctx, tagParentCountCriterionHandler(qb, tagFilter.ParentCount)) + query.handleCriterion(ctx, tagChildCountCriterionHandler(qb, tagFilter.ChildCount)) return query } -func (qb *tagQueryBuilder) Query(tagFilter *models.TagFilterType, findFilter *models.FindFilterType) ([]*models.Tag, int, error) { +func (qb *tagQueryBuilder) Query(ctx context.Context, tagFilter *models.TagFilterType, findFilter *models.FindFilterType) ([]*models.Tag, int, error) { if tagFilter == nil { tagFilter = &models.TagFilterType{} } @@ -335,23 +360,19 @@ func (qb *tagQueryBuilder) Query(tagFilter *models.TagFilterType, findFilter *mo if err := qb.validateFilter(tagFilter); err != nil { return nil, 0, err } - filter := qb.makeFilter(tagFilter) + filter := qb.makeFilter(ctx, tagFilter) query.addFilter(filter) query.sortAndPagination = qb.getTagSort(&query, findFilter) + getPagination(findFilter) - idsResult, countResult, err := query.executeFind() + idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err } - var tags []*models.Tag - for _, id := range idsResult { - tag, err := qb.Find(id) - if err != nil { - return nil, 0, err - } - tags = append(tags, tag) + tags, err := qb.FindMany(ctx, idsResult) + if err != nil { + return nil, 0, err } return tags, countResult, nil @@ -370,7 +391,7 @@ func tagAliasCriterionHandler(qb *tagQueryBuilder, alias *models.StringCriterion } func tagIsMissingCriterionHandler(qb *tagQueryBuilder, isMissing *string) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if isMissing != nil && *isMissing != "" { switch *isMissing { case "image": @@ -384,7 +405,7 @@ func tagIsMissingCriterionHandler(qb *tagQueryBuilder, isMissing *string) criter } func tagSceneCountCriterionHandler(qb *tagQueryBuilder, sceneCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if sceneCount != nil { f.addLeftJoin("scenes_tags", "", "scenes_tags.tag_id = tags.id") clause, args := getIntCriterionWhereClause("count(distinct scenes_tags.scene_id)", *sceneCount) @@ -395,7 +416,7 @@ func tagSceneCountCriterionHandler(qb *tagQueryBuilder, sceneCount *models.IntCr } func tagImageCountCriterionHandler(qb *tagQueryBuilder, imageCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if imageCount != nil { f.addLeftJoin("images_tags", "", "images_tags.tag_id = tags.id") clause, args := getIntCriterionWhereClause("count(distinct images_tags.image_id)", *imageCount) @@ -406,7 +427,7 @@ func tagImageCountCriterionHandler(qb *tagQueryBuilder, imageCount *models.IntCr } func tagGalleryCountCriterionHandler(qb *tagQueryBuilder, galleryCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if galleryCount != nil { f.addLeftJoin("galleries_tags", "", "galleries_tags.tag_id = tags.id") clause, args := getIntCriterionWhereClause("count(distinct galleries_tags.gallery_id)", *galleryCount) @@ -417,7 +438,7 @@ func tagGalleryCountCriterionHandler(qb *tagQueryBuilder, galleryCount *models.I } func tagPerformerCountCriterionHandler(qb *tagQueryBuilder, performerCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if performerCount != nil { f.addLeftJoin("performers_tags", "", "performers_tags.tag_id = tags.id") clause, args := getIntCriterionWhereClause("count(distinct performers_tags.performer_id)", *performerCount) @@ -428,7 +449,7 @@ func tagPerformerCountCriterionHandler(qb *tagQueryBuilder, performerCount *mode } func tagMarkerCountCriterionHandler(qb *tagQueryBuilder, markerCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if markerCount != nil { f.addLeftJoin("scene_markers_tags", "", "scene_markers_tags.tag_id = tags.id") f.addLeftJoin("scene_markers", "", "scene_markers_tags.scene_marker_id = scene_markers.id OR scene_markers.primary_tag_id = tags.id") @@ -440,7 +461,7 @@ func tagMarkerCountCriterionHandler(qb *tagQueryBuilder, markerCount *models.Int } func tagParentsCriterionHandler(qb *tagQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if tags != nil { if tags.Modifier == models.CriterionModifierIsNull || tags.Modifier == models.CriterionModifierNotNull { var notClause string @@ -489,7 +510,7 @@ func tagParentsCriterionHandler(qb *tagQueryBuilder, tags *models.HierarchicalMu } func tagChildrenCriterionHandler(qb *tagQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if tags != nil { if tags.Modifier == models.CriterionModifierIsNull || tags.Modifier == models.CriterionModifierNotNull { var notClause string @@ -538,7 +559,7 @@ func tagChildrenCriterionHandler(qb *tagQueryBuilder, tags *models.HierarchicalM } func tagParentCountCriterionHandler(qb *tagQueryBuilder, parentCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if parentCount != nil { f.addLeftJoin("tags_relations", "parents_count", "parents_count.child_id = tags.id") clause, args := getIntCriterionWhereClause("count(distinct parents_count.parent_id)", *parentCount) @@ -549,7 +570,7 @@ func tagParentCountCriterionHandler(qb *tagQueryBuilder, parentCount *models.Int } func tagChildCountCriterionHandler(qb *tagQueryBuilder, childCount *models.IntCriterionInput) criterionHandlerFunc { - return func(f *filterBuilder) { + return func(ctx context.Context, f *filterBuilder) { if childCount != nil { f.addLeftJoin("tags_relations", "children_count", "children_count.parent_id = tags.id") clause, args := getIntCriterionWhereClause("count(distinct children_count.child_id)", *childCount) @@ -592,17 +613,17 @@ func (qb *tagQueryBuilder) getTagSort(query *queryBuilder, findFilter *models.Fi return getSort(sort, direction, "tags") } -func (qb *tagQueryBuilder) queryTag(query string, args []interface{}) (*models.Tag, error) { - results, err := qb.queryTags(query, args) +func (qb *tagQueryBuilder) queryTag(ctx context.Context, query string, args []interface{}) (*models.Tag, error) { + results, err := qb.queryTags(ctx, query, args) if err != nil || len(results) < 1 { return nil, err } return results[0], nil } -func (qb *tagQueryBuilder) queryTags(query string, args []interface{}) ([]*models.Tag, error) { +func (qb *tagQueryBuilder) queryTags(ctx context.Context, query string, args []interface{}) ([]*models.Tag, error) { var ret models.Tags - if err := qb.query(query, args, &ret); err != nil { + if err := qb.query(ctx, query, args, &ret); err != nil { return nil, err } @@ -620,20 +641,20 @@ func (qb *tagQueryBuilder) imageRepository() *imageRepository { } } -func (qb *tagQueryBuilder) GetImage(tagID int) ([]byte, error) { - return qb.imageRepository().get(tagID) +func (qb *tagQueryBuilder) GetImage(ctx context.Context, tagID int) ([]byte, error) { + return qb.imageRepository().get(ctx, tagID) } -func (qb *tagQueryBuilder) HasImage(tagID int) (bool, error) { - return qb.imageRepository().exists(tagID) +func (qb *tagQueryBuilder) HasImage(ctx context.Context, tagID int) (bool, error) { + return qb.imageRepository().exists(ctx, tagID) } -func (qb *tagQueryBuilder) UpdateImage(tagID int, image []byte) error { - return qb.imageRepository().replace(tagID, image) +func (qb *tagQueryBuilder) UpdateImage(ctx context.Context, tagID int, image []byte) error { + return qb.imageRepository().replace(ctx, tagID, image) } -func (qb *tagQueryBuilder) DestroyImage(tagID int) error { - return qb.imageRepository().destroy([]int{tagID}) +func (qb *tagQueryBuilder) DestroyImage(ctx context.Context, tagID int) error { + return qb.imageRepository().destroy(ctx, []int{tagID}) } func (qb *tagQueryBuilder) aliasRepository() *stringRepository { @@ -647,15 +668,15 @@ func (qb *tagQueryBuilder) aliasRepository() *stringRepository { } } -func (qb *tagQueryBuilder) GetAliases(tagID int) ([]string, error) { - return qb.aliasRepository().get(tagID) +func (qb *tagQueryBuilder) GetAliases(ctx context.Context, tagID int) ([]string, error) { + return qb.aliasRepository().get(ctx, tagID) } -func (qb *tagQueryBuilder) UpdateAliases(tagID int, aliases []string) error { - return qb.aliasRepository().replace(tagID, aliases) +func (qb *tagQueryBuilder) UpdateAliases(ctx context.Context, tagID int, aliases []string) error { + return qb.aliasRepository().replace(ctx, tagID, aliases) } -func (qb *tagQueryBuilder) Merge(source []int, destination int) error { +func (qb *tagQueryBuilder) Merge(ctx context.Context, source []int, destination int) error { if len(source) == 0 { return nil } @@ -680,7 +701,7 @@ func (qb *tagQueryBuilder) Merge(source []int, destination int) error { args = append(args, destination) for table, idColumn := range tagTables { - _, err := qb.tx.Exec(`UPDATE `+table+` + _, err := qb.tx.Exec(ctx, `UPDATE `+table+` SET tag_id = ? WHERE tag_id IN `+inBinding+` AND NOT EXISTS(SELECT 1 FROM `+table+` o WHERE o.`+idColumn+` = `+table+`.`+idColumn+` AND o.tag_id = ?)`, @@ -691,23 +712,23 @@ AND NOT EXISTS(SELECT 1 FROM `+table+` o WHERE o.`+idColumn+` = `+table+`.`+idCo } } - _, err := qb.tx.Exec("UPDATE "+sceneMarkerTable+" SET primary_tag_id = ? WHERE primary_tag_id IN "+inBinding, args...) + _, err := qb.tx.Exec(ctx, "UPDATE "+sceneMarkerTable+" SET primary_tag_id = ? WHERE primary_tag_id IN "+inBinding, args...) if err != nil { return err } - _, err = qb.tx.Exec("INSERT INTO "+tagAliasesTable+" (tag_id, alias) SELECT ?, name FROM "+tagTable+" WHERE id IN "+inBinding, args...) + _, err = qb.tx.Exec(ctx, "INSERT INTO "+tagAliasesTable+" (tag_id, alias) SELECT ?, name FROM "+tagTable+" WHERE id IN "+inBinding, args...) if err != nil { return err } - _, err = qb.tx.Exec("UPDATE "+tagAliasesTable+" SET tag_id = ? WHERE tag_id IN "+inBinding, args...) + _, err = qb.tx.Exec(ctx, "UPDATE "+tagAliasesTable+" SET tag_id = ? WHERE tag_id IN "+inBinding, args...) if err != nil { return err } for _, id := range source { - err = qb.Destroy(id) + err = qb.Destroy(ctx, id) if err != nil { return err } @@ -716,9 +737,9 @@ AND NOT EXISTS(SELECT 1 FROM `+table+` o WHERE o.`+idColumn+` = `+table+`.`+idCo return nil } -func (qb *tagQueryBuilder) UpdateParentTags(tagID int, parentIDs []int) error { +func (qb *tagQueryBuilder) UpdateParentTags(ctx context.Context, tagID int, parentIDs []int) error { tx := qb.tx - if _, err := tx.Exec("DELETE FROM tags_relations WHERE child_id = ?", tagID); err != nil { + if _, err := tx.Exec(ctx, "DELETE FROM tags_relations WHERE child_id = ?", tagID); err != nil { return err } @@ -731,7 +752,7 @@ func (qb *tagQueryBuilder) UpdateParentTags(tagID int, parentIDs []int) error { } query := "INSERT INTO tags_relations (parent_id, child_id) VALUES " + strings.Join(values, ", ") - if _, err := tx.Exec(query, args...); err != nil { + if _, err := tx.Exec(ctx, query, args...); err != nil { return err } } @@ -739,9 +760,9 @@ func (qb *tagQueryBuilder) UpdateParentTags(tagID int, parentIDs []int) error { return nil } -func (qb *tagQueryBuilder) UpdateChildTags(tagID int, childIDs []int) error { +func (qb *tagQueryBuilder) UpdateChildTags(ctx context.Context, tagID int, childIDs []int) error { tx := qb.tx - if _, err := tx.Exec("DELETE FROM tags_relations WHERE parent_id = ?", tagID); err != nil { + if _, err := tx.Exec(ctx, "DELETE FROM tags_relations WHERE parent_id = ?", tagID); err != nil { return err } @@ -754,7 +775,7 @@ func (qb *tagQueryBuilder) UpdateChildTags(tagID int, childIDs []int) error { } query := "INSERT INTO tags_relations (parent_id, child_id) VALUES " + strings.Join(values, ", ") - if _, err := tx.Exec(query, args...); err != nil { + if _, err := tx.Exec(ctx, query, args...); err != nil { return err } } @@ -764,7 +785,7 @@ func (qb *tagQueryBuilder) UpdateChildTags(tagID int, childIDs []int) error { // FindAllAncestors returns a slice of TagPath objects, representing all // ancestors of the tag with the provided id. -func (qb *tagQueryBuilder) FindAllAncestors(tagID int, excludeIDs []int) ([]*models.TagPath, error) { +func (qb *tagQueryBuilder) FindAllAncestors(ctx context.Context, tagID int, excludeIDs []int) ([]*models.TagPath, error) { inBinding := getInBinding(len(excludeIDs) + 1) query := `WITH RECURSIVE @@ -783,7 +804,7 @@ SELECT t.*, p.path FROM tags t INNER JOIN parents p ON t.id = p.parent_id } args := []interface{}{tagID} args = append(args, append(append(excludeArgs, excludeArgs...), excludeArgs...)...) - if err := qb.query(query, args, &ret); err != nil { + if err := qb.query(ctx, query, args, &ret); err != nil { return nil, err } @@ -792,7 +813,7 @@ SELECT t.*, p.path FROM tags t INNER JOIN parents p ON t.id = p.parent_id // FindAllDescendants returns a slice of TagPath objects, representing all // descendants of the tag with the provided id. -func (qb *tagQueryBuilder) FindAllDescendants(tagID int, excludeIDs []int) ([]*models.TagPath, error) { +func (qb *tagQueryBuilder) FindAllDescendants(ctx context.Context, tagID int, excludeIDs []int) ([]*models.TagPath, error) { inBinding := getInBinding(len(excludeIDs) + 1) query := `WITH RECURSIVE @@ -811,7 +832,7 @@ SELECT t.*, c.path FROM tags t INNER JOIN children c ON t.id = c.child_id } args := []interface{}{tagID} args = append(args, append(append(excludeArgs, excludeArgs...), excludeArgs...)...) - if err := qb.query(query, args, &ret); err != nil { + if err := qb.query(ctx, query, args, &ret); err != nil { return nil, err } diff --git a/pkg/sqlite/tag_test.go b/pkg/sqlite/tag_test.go index a5ed8d966..2f91658de 100644 --- a/pkg/sqlite/tag_test.go +++ b/pkg/sqlite/tag_test.go @@ -4,6 +4,7 @@ package sqlite_test import ( + "context" "database/sql" "fmt" "math" @@ -12,16 +13,17 @@ import ( "testing" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sqlite" "github.com/stretchr/testify/assert" ) func TestMarkerFindBySceneMarkerID(t *testing.T) { - withTxn(func(r models.Repository) error { - tqb := r.Tag() + withTxn(func(ctx context.Context) error { + tqb := sqlite.TagReaderWriter markerID := markerIDs[markerIdxWithTag] - tags, err := tqb.FindBySceneMarkerID(markerID) + tags, err := tqb.FindBySceneMarkerID(ctx, markerID) if err != nil { t.Errorf("Error finding tags: %s", err.Error()) @@ -30,7 +32,7 @@ func TestMarkerFindBySceneMarkerID(t *testing.T) { assert.Len(t, tags, 1) assert.Equal(t, tagIDs[tagIdxWithMarkers], tags[0].ID) - tags, err = tqb.FindBySceneMarkerID(0) + tags, err = tqb.FindBySceneMarkerID(ctx, 0) if err != nil { t.Errorf("Error finding tags: %s", err.Error()) @@ -43,12 +45,12 @@ func TestMarkerFindBySceneMarkerID(t *testing.T) { } func TestTagFindByName(t *testing.T) { - withTxn(func(r models.Repository) error { - tqb := r.Tag() + withTxn(func(ctx context.Context) error { + tqb := sqlite.TagReaderWriter name := tagNames[tagIdxWithScene] // find a tag by name - tag, err := tqb.FindByName(name, false) + tag, err := tqb.FindByName(ctx, name, false) if err != nil { t.Errorf("Error finding tags: %s", err.Error()) @@ -58,7 +60,7 @@ func TestTagFindByName(t *testing.T) { name = tagNames[tagIdxWithDupName] // find a tag by name nocase - tag, err = tqb.FindByName(name, true) + tag, err = tqb.FindByName(ctx, name, true) if err != nil { t.Errorf("Error finding tags: %s", err.Error()) @@ -74,15 +76,15 @@ func TestTagFindByName(t *testing.T) { } func TestTagQueryIgnoreAutoTag(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { ignoreAutoTag := true tagFilter := models.TagFilterType{ IgnoreAutoTag: &ignoreAutoTag, } - sqb := r.Tag() + sqb := sqlite.TagReaderWriter - tags := queryTags(t, sqb, &tagFilter, nil) + tags := queryTags(ctx, t, sqb, &tagFilter, nil) assert.Len(t, tags, int(math.Ceil(float64(totalTags)/5))) for _, s := range tags { @@ -94,12 +96,12 @@ func TestTagQueryIgnoreAutoTag(t *testing.T) { } func TestTagQueryForAutoTag(t *testing.T) { - withTxn(func(r models.Repository) error { - tqb := r.Tag() + withTxn(func(ctx context.Context) error { + tqb := sqlite.TagReaderWriter name := tagNames[tagIdx1WithScene] // find a tag by name - tags, err := tqb.QueryForAutoTag([]string{name}) + tags, err := tqb.QueryForAutoTag(ctx, []string{name}) if err != nil { t.Errorf("Error finding tags: %s", err.Error()) @@ -112,7 +114,7 @@ func TestTagQueryForAutoTag(t *testing.T) { // find by alias name = getTagStringValue(tagIdx1WithScene, "Alias") - tags, err = tqb.QueryForAutoTag([]string{name}) + tags, err = tqb.QueryForAutoTag(ctx, []string{name}) if err != nil { t.Errorf("Error finding tags: %s", err.Error()) @@ -128,19 +130,19 @@ func TestTagQueryForAutoTag(t *testing.T) { func TestTagFindByNames(t *testing.T) { var names []string - withTxn(func(r models.Repository) error { - tqb := r.Tag() + withTxn(func(ctx context.Context) error { + tqb := sqlite.TagReaderWriter names = append(names, tagNames[tagIdxWithScene]) // find tags by names - tags, err := tqb.FindByNames(names, false) + tags, err := tqb.FindByNames(ctx, names, false) if err != nil { t.Errorf("Error finding tags: %s", err.Error()) } assert.Len(t, tags, 1) assert.Equal(t, tagNames[tagIdxWithScene], tags[0].Name) - tags, err = tqb.FindByNames(names, true) // find tags by names nocase + tags, err = tqb.FindByNames(ctx, names, true) // find tags by names nocase if err != nil { t.Errorf("Error finding tags: %s", err.Error()) } @@ -150,7 +152,7 @@ func TestTagFindByNames(t *testing.T) { names = append(names, tagNames[tagIdx1WithScene]) // find tags by names ( 2 names ) - tags, err = tqb.FindByNames(names, false) + tags, err = tqb.FindByNames(ctx, names, false) if err != nil { t.Errorf("Error finding tags: %s", err.Error()) } @@ -158,7 +160,7 @@ func TestTagFindByNames(t *testing.T) { assert.Equal(t, tagNames[tagIdxWithScene], tags[0].Name) assert.Equal(t, tagNames[tagIdx1WithScene], tags[1].Name) - tags, err = tqb.FindByNames(names, true) // find tags by names ( 2 names nocase) + tags, err = tqb.FindByNames(ctx, names, true) // find tags by names ( 2 names nocase) if err != nil { t.Errorf("Error finding tags: %s", err.Error()) } @@ -173,8 +175,8 @@ func TestTagFindByNames(t *testing.T) { } func TestTagQuerySort(t *testing.T) { - withTxn(func(r models.Repository) error { - sqb := r.Tag() + withTxn(func(ctx context.Context) error { + sqb := sqlite.TagReaderWriter sortBy := "scenes_count" dir := models.SortDirectionEnumDesc @@ -183,24 +185,24 @@ func TestTagQuerySort(t *testing.T) { Direction: &dir, } - tags := queryTags(t, sqb, nil, findFilter) + tags := queryTags(ctx, t, sqb, nil, findFilter) assert := assert.New(t) assert.Equal(tagIDs[tagIdxWithScene], tags[0].ID) sortBy = "scene_markers_count" - tags = queryTags(t, sqb, nil, findFilter) + tags = queryTags(ctx, t, sqb, nil, findFilter) assert.Equal(tagIDs[tagIdxWithMarkers], tags[0].ID) sortBy = "images_count" - tags = queryTags(t, sqb, nil, findFilter) + tags = queryTags(ctx, t, sqb, nil, findFilter) assert.Equal(tagIDs[tagIdxWithImage], tags[0].ID) sortBy = "galleries_count" - tags = queryTags(t, sqb, nil, findFilter) + tags = queryTags(ctx, t, sqb, nil, findFilter) assert.Equal(tagIDs[tagIdxWithGallery], tags[0].ID) sortBy = "performers_count" - tags = queryTags(t, sqb, nil, findFilter) + tags = queryTags(ctx, t, sqb, nil, findFilter) assert.Equal(tagIDs[tagIdxWithPerformer], tags[0].ID) return nil @@ -220,7 +222,7 @@ func TestTagQueryName(t *testing.T) { Name: nameCriterion, } - verifyFn := func(tag *models.Tag, r models.Repository) { + verifyFn := func(ctx context.Context, tag *models.Tag) { verifyString(t, tag.Name, *nameCriterion) } @@ -250,8 +252,8 @@ func TestTagQueryAlias(t *testing.T) { Aliases: aliasCriterion, } - verifyFn := func(tag *models.Tag, r models.Repository) { - aliases, err := r.Tag().GetAliases(tag.ID) + verifyFn := func(ctx context.Context, tag *models.Tag) { + aliases, err := sqlite.TagReaderWriter.GetAliases(ctx, tag.ID) if err != nil { t.Errorf("Error querying tags: %s", err.Error()) } @@ -277,23 +279,23 @@ func TestTagQueryAlias(t *testing.T) { verifyTagQuery(t, tagFilter, nil, verifyFn) } -func verifyTagQuery(t *testing.T, tagFilter *models.TagFilterType, findFilter *models.FindFilterType, verifyFn func(t *models.Tag, r models.Repository)) { - withTxn(func(r models.Repository) error { - sqb := r.Tag() +func verifyTagQuery(t *testing.T, tagFilter *models.TagFilterType, findFilter *models.FindFilterType, verifyFn func(ctx context.Context, t *models.Tag)) { + withTxn(func(ctx context.Context) error { + sqb := sqlite.TagReaderWriter - tags := queryTags(t, sqb, tagFilter, findFilter) + tags := queryTags(ctx, t, sqb, tagFilter, findFilter) for _, tag := range tags { - verifyFn(tag, r) + verifyFn(ctx, tag) } return nil }) } -func queryTags(t *testing.T, qb models.TagReader, tagFilter *models.TagFilterType, findFilter *models.FindFilterType) []*models.Tag { +func queryTags(ctx context.Context, t *testing.T, qb models.TagReader, tagFilter *models.TagFilterType, findFilter *models.FindFilterType) []*models.Tag { t.Helper() - tags, _, err := qb.Query(tagFilter, findFilter) + tags, _, err := qb.Query(ctx, tagFilter, findFilter) if err != nil { t.Errorf("Error querying tags: %s", err.Error()) } @@ -302,8 +304,8 @@ func queryTags(t *testing.T, qb models.TagReader, tagFilter *models.TagFilterTyp } func TestTagQueryIsMissingImage(t *testing.T) { - withTxn(func(r models.Repository) error { - qb := r.Tag() + withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter isMissing := "image" tagFilter := models.TagFilterType{ IsMissing: &isMissing, @@ -314,7 +316,7 @@ func TestTagQueryIsMissingImage(t *testing.T) { Q: &q, } - tags, _, err := qb.Query(&tagFilter, &findFilter) + tags, _, err := qb.Query(ctx, &tagFilter, &findFilter) if err != nil { t.Errorf("Error querying tag: %s", err.Error()) } @@ -322,7 +324,7 @@ func TestTagQueryIsMissingImage(t *testing.T) { assert.Len(t, tags, 0) findFilter.Q = nil - tags, _, err = qb.Query(&tagFilter, &findFilter) + tags, _, err = qb.Query(ctx, &tagFilter, &findFilter) if err != nil { t.Errorf("Error querying tag: %s", err.Error()) } @@ -356,13 +358,13 @@ func TestTagQuerySceneCount(t *testing.T) { } func verifyTagSceneCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - qb := r.Tag() + withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter tagFilter := models.TagFilterType{ SceneCount: &sceneCountCriterion, } - tags, _, err := qb.Query(&tagFilter, nil) + tags, _, err := qb.Query(ctx, &tagFilter, nil) if err != nil { t.Errorf("Error querying tag: %s", err.Error()) } @@ -398,13 +400,13 @@ func TestTagQueryMarkerCount(t *testing.T) { } func verifyTagMarkerCount(t *testing.T, markerCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - qb := r.Tag() + withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter tagFilter := models.TagFilterType{ MarkerCount: &markerCountCriterion, } - tags, _, err := qb.Query(&tagFilter, nil) + tags, _, err := qb.Query(ctx, &tagFilter, nil) if err != nil { t.Errorf("Error querying tag: %s", err.Error()) } @@ -440,13 +442,13 @@ func TestTagQueryImageCount(t *testing.T) { } func verifyTagImageCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - qb := r.Tag() + withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter tagFilter := models.TagFilterType{ ImageCount: &imageCountCriterion, } - tags, _, err := qb.Query(&tagFilter, nil) + tags, _, err := qb.Query(ctx, &tagFilter, nil) if err != nil { t.Errorf("Error querying tag: %s", err.Error()) } @@ -482,13 +484,13 @@ func TestTagQueryGalleryCount(t *testing.T) { } func verifyTagGalleryCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - qb := r.Tag() + withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter tagFilter := models.TagFilterType{ GalleryCount: &imageCountCriterion, } - tags, _, err := qb.Query(&tagFilter, nil) + tags, _, err := qb.Query(ctx, &tagFilter, nil) if err != nil { t.Errorf("Error querying tag: %s", err.Error()) } @@ -524,13 +526,13 @@ func TestTagQueryPerformerCount(t *testing.T) { } func verifyTagPerformerCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - qb := r.Tag() + withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter tagFilter := models.TagFilterType{ PerformerCount: &imageCountCriterion, } - tags, _, err := qb.Query(&tagFilter, nil) + tags, _, err := qb.Query(ctx, &tagFilter, nil) if err != nil { t.Errorf("Error querying tag: %s", err.Error()) } @@ -566,13 +568,13 @@ func TestTagQueryParentCount(t *testing.T) { } func verifyTagParentCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - qb := r.Tag() + withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter tagFilter := models.TagFilterType{ ParentCount: &sceneCountCriterion, } - tags := queryTags(t, qb, &tagFilter, nil) + tags := queryTags(ctx, t, qb, &tagFilter, nil) if len(tags) == 0 { t.Error("Expected at least one tag") @@ -609,13 +611,13 @@ func TestTagQueryChildCount(t *testing.T) { } func verifyTagChildCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) { - withTxn(func(r models.Repository) error { - qb := r.Tag() + withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter tagFilter := models.TagFilterType{ ChildCount: &sceneCountCriterion, } - tags := queryTags(t, qb, &tagFilter, nil) + tags := queryTags(ctx, t, qb, &tagFilter, nil) if len(tags) == 0 { t.Error("Expected at least one tag") @@ -633,9 +635,9 @@ func verifyTagChildCount(t *testing.T, sceneCountCriterion models.IntCriterionIn } func TestTagQueryParent(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { const nameField = "Name" - sqb := r.Tag() + sqb := sqlite.TagReaderWriter tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithChildTag]), @@ -647,7 +649,7 @@ func TestTagQueryParent(t *testing.T) { Parents: &tagCriterion, } - tags := queryTags(t, sqb, &tagFilter, nil) + tags := queryTags(ctx, t, sqb, &tagFilter, nil) assert.Len(t, tags, 1) @@ -661,7 +663,7 @@ func TestTagQueryParent(t *testing.T) { Q: &q, } - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 0) depth := -1 @@ -674,12 +676,12 @@ func TestTagQueryParent(t *testing.T) { Depth: &depth, } - tags = queryTags(t, sqb, &tagFilter, nil) + tags = queryTags(ctx, t, sqb, &tagFilter, nil) assert.Len(t, tags, 2) depth = 1 - tags = queryTags(t, sqb, &tagFilter, nil) + tags = queryTags(ctx, t, sqb, &tagFilter, nil) assert.Len(t, tags, 2) tagCriterion = models.HierarchicalMultiCriterionInput{ @@ -687,22 +689,22 @@ func TestTagQueryParent(t *testing.T) { } q = getTagStringValue(tagIdxWithGallery, nameField) - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 1) assert.Equal(t, tagIDs[tagIdxWithGallery], tags[0].ID) q = getTagStringValue(tagIdxWithParentTag, nameField) - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 0) tagCriterion.Modifier = models.CriterionModifierNotNull - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 1) assert.Equal(t, tagIDs[tagIdxWithParentTag], tags[0].ID) q = getTagStringValue(tagIdxWithGallery, nameField) - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 0) return nil @@ -710,10 +712,10 @@ func TestTagQueryParent(t *testing.T) { } func TestTagQueryChild(t *testing.T) { - withTxn(func(r models.Repository) error { + withTxn(func(ctx context.Context) error { const nameField = "Name" - sqb := r.Tag() + sqb := sqlite.TagReaderWriter tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithParentTag]), @@ -725,7 +727,7 @@ func TestTagQueryChild(t *testing.T) { Children: &tagCriterion, } - tags := queryTags(t, sqb, &tagFilter, nil) + tags := queryTags(ctx, t, sqb, &tagFilter, nil) assert.Len(t, tags, 1) @@ -739,7 +741,7 @@ func TestTagQueryChild(t *testing.T) { Q: &q, } - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 0) depth := -1 @@ -752,12 +754,12 @@ func TestTagQueryChild(t *testing.T) { Depth: &depth, } - tags = queryTags(t, sqb, &tagFilter, nil) + tags = queryTags(ctx, t, sqb, &tagFilter, nil) assert.Len(t, tags, 2) depth = 1 - tags = queryTags(t, sqb, &tagFilter, nil) + tags = queryTags(ctx, t, sqb, &tagFilter, nil) assert.Len(t, tags, 2) tagCriterion = models.HierarchicalMultiCriterionInput{ @@ -765,22 +767,22 @@ func TestTagQueryChild(t *testing.T) { } q = getTagStringValue(tagIdxWithGallery, nameField) - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 1) assert.Equal(t, tagIDs[tagIdxWithGallery], tags[0].ID) q = getTagStringValue(tagIdxWithChildTag, nameField) - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 0) tagCriterion.Modifier = models.CriterionModifierNotNull - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 1) assert.Equal(t, tagIDs[tagIdxWithChildTag], tags[0].ID) q = getTagStringValue(tagIdxWithGallery, nameField) - tags = queryTags(t, sqb, &tagFilter, &findFilter) + tags = queryTags(ctx, t, sqb, &tagFilter, &findFilter) assert.Len(t, tags, 0) return nil @@ -788,34 +790,34 @@ func TestTagQueryChild(t *testing.T) { } func TestTagUpdateTagImage(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Tag() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter // create tag to test against const name = "TestTagUpdateTagImage" tag := models.Tag{ Name: name, } - created, err := qb.Create(tag) + created, err := qb.Create(ctx, tag) if err != nil { return fmt.Errorf("Error creating tag: %s", err.Error()) } image := []byte("image") - err = qb.UpdateImage(created.ID, image) + err = qb.UpdateImage(ctx, created.ID, image) if err != nil { return fmt.Errorf("Error updating studio image: %s", err.Error()) } // ensure image set - storedImage, err := qb.GetImage(created.ID) + storedImage, err := qb.GetImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting image: %s", err.Error()) } assert.Equal(t, storedImage, image) // set nil image - err = qb.UpdateImage(created.ID, nil) + err = qb.UpdateImage(ctx, created.ID, nil) if err == nil { return fmt.Errorf("Expected error setting nil image") } @@ -827,32 +829,32 @@ func TestTagUpdateTagImage(t *testing.T) { } func TestTagDestroyTagImage(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Tag() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter // create performer to test against const name = "TestTagDestroyTagImage" tag := models.Tag{ Name: name, } - created, err := qb.Create(tag) + created, err := qb.Create(ctx, tag) if err != nil { return fmt.Errorf("Error creating tag: %s", err.Error()) } image := []byte("image") - err = qb.UpdateImage(created.ID, image) + err = qb.UpdateImage(ctx, created.ID, image) if err != nil { return fmt.Errorf("Error updating studio image: %s", err.Error()) } - err = qb.DestroyImage(created.ID) + err = qb.DestroyImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error destroying studio image: %s", err.Error()) } // image should be nil - storedImage, err := qb.GetImage(created.ID) + storedImage, err := qb.GetImage(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting image: %s", err.Error()) } @@ -865,27 +867,27 @@ func TestTagDestroyTagImage(t *testing.T) { } func TestTagUpdateAlias(t *testing.T) { - if err := withTxn(func(r models.Repository) error { - qb := r.Tag() + if err := withTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter // create tag to test against const name = "TestTagUpdateAlias" tag := models.Tag{ Name: name, } - created, err := qb.Create(tag) + created, err := qb.Create(ctx, tag) if err != nil { return fmt.Errorf("Error creating tag: %s", err.Error()) } aliases := []string{"alias1", "alias2"} - err = qb.UpdateAliases(created.ID, aliases) + err = qb.UpdateAliases(ctx, created.ID, aliases) if err != nil { return fmt.Errorf("Error updating tag aliases: %s", err.Error()) } // ensure aliases set - storedAliases, err := qb.GetAliases(created.ID) + storedAliases, err := qb.GetAliases(ctx, created.ID) if err != nil { return fmt.Errorf("Error getting aliases: %s", err.Error()) } @@ -901,11 +903,11 @@ func TestTagMerge(t *testing.T) { assert := assert.New(t) // merge tests - perform these in a transaction that we'll rollback - if err := withRollbackTxn(func(r models.Repository) error { - qb := r.Tag() + if err := withRollbackTxn(func(ctx context.Context) error { + qb := sqlite.TagReaderWriter // try merging into same tag - err := qb.Merge([]int{tagIDs[tagIdx1WithScene]}, tagIDs[tagIdx1WithScene]) + err := qb.Merge(ctx, []int{tagIDs[tagIdx1WithScene]}, tagIDs[tagIdx1WithScene]) assert.NotNil(err) // merge everything into tagIdxWithScene @@ -931,13 +933,13 @@ func TestTagMerge(t *testing.T) { } destID := tagIDs[tagIdxWithScene] - if err = qb.Merge(srcIDs, destID); err != nil { + if err = qb.Merge(ctx, srcIDs, destID); err != nil { return err } // ensure other tags are deleted for _, tagId := range srcIDs { - t, err := qb.Find(tagId) + t, err := qb.Find(ctx, tagId) if err != nil { return err } @@ -946,7 +948,7 @@ func TestTagMerge(t *testing.T) { } // ensure aliases are set on the destination - destAliases, err := qb.GetAliases(destID) + destAliases, err := qb.GetAliases(ctx, destID) if err != nil { return err } @@ -955,22 +957,26 @@ func TestTagMerge(t *testing.T) { } // ensure scene points to new tag - sceneTagIDs, err := r.Scene().GetTagIDs(sceneIDs[sceneIdxWithTwoTags]) + s, err := db.Scene.Find(ctx, sceneIDs[sceneIdxWithTwoTags]) if err != nil { return err } + if err := s.LoadTagIDs(ctx, db.Scene); err != nil { + return err + } + sceneTagIDs := s.TagIDs.List() assert.Contains(sceneTagIDs, destID) // ensure marker points to new tag - marker, err := r.SceneMarker().Find(markerIDs[markerIdxWithTag]) + marker, err := sqlite.SceneMarkerReaderWriter.Find(ctx, markerIDs[markerIdxWithTag]) if err != nil { return err } assert.Equal(destID, marker.PrimaryTagID) - markerTagIDs, err := r.SceneMarker().GetTagIDs(marker.ID) + markerTagIDs, err := sqlite.SceneMarkerReaderWriter.GetTagIDs(ctx, marker.ID) if err != nil { return err } @@ -978,23 +984,27 @@ func TestTagMerge(t *testing.T) { assert.Contains(markerTagIDs, destID) // ensure image points to new tag - imageTagIDs, err := r.Image().GetTagIDs(imageIDs[imageIdxWithTwoTags]) + imageTagIDs, err := db.Image.GetTagIDs(ctx, imageIDs[imageIdxWithTwoTags]) if err != nil { return err } assert.Contains(imageTagIDs, destID) - // ensure gallery points to new tag - galleryTagIDs, err := r.Gallery().GetTagIDs(galleryIDs[galleryIdxWithTwoTags]) + g, err := db.Gallery.Find(ctx, galleryIDs[galleryIdxWithTwoTags]) if err != nil { return err } - assert.Contains(galleryTagIDs, destID) + if err := g.LoadTagIDs(ctx, db.Gallery); err != nil { + return err + } + + // ensure gallery points to new tag + assert.Contains(g.TagIDs.List(), destID) // ensure performer points to new tag - performerTagIDs, err := r.Gallery().GetTagIDs(performerIDs[performerIdxWithTwoTags]) + performerTagIDs, err := sqlite.PerformerReaderWriter.GetTagIDs(ctx, performerIDs[performerIdxWithTwoTags]) if err != nil { return err } diff --git a/pkg/sqlite/transaction.go b/pkg/sqlite/transaction.go index 50486d01e..0e3234c5c 100644 --- a/pkg/sqlite/transaction.go +++ b/pkg/sqlite/transaction.go @@ -2,209 +2,128 @@ package sqlite import ( "context" - "database/sql" "errors" "fmt" + "runtime/debug" "github.com/jmoiron/sqlx" - "github.com/stashapp/stash/pkg/database" + "github.com/mattn/go-sqlite3" + "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" ) -type dbi interface { - Get(dest interface{}, query string, args ...interface{}) error - Select(dest interface{}, query string, args ...interface{}) error - Queryx(query string, args ...interface{}) (*sqlx.Rows, error) - NamedExec(query string, arg interface{}) (sql.Result, error) - Exec(query string, args ...interface{}) (sql.Result, error) -} +type key int -type transaction struct { - Ctx context.Context - tx *sqlx.Tx -} +const ( + txnKey key = iota + 1 + dbKey + hookManagerKey +) -func (t *transaction) Begin() error { - if t.tx != nil { - return errors.New("transaction already begun") +func (db *Database) WithDatabase(ctx context.Context) (context.Context, error) { + // if we are already in a transaction or have a database already, just use it + if tx, _ := getDBReader(ctx); tx != nil { + return ctx, nil } - if err := database.Ready(); err != nil { + return context.WithValue(ctx, dbKey, db.db), nil +} + +func (db *Database) Begin(ctx context.Context) (context.Context, error) { + if tx, _ := getTx(ctx); tx != nil { + // log the stack trace so we can see + logger.Error(string(debug.Stack())) + + return nil, fmt.Errorf("already in transaction") + } + + tx, err := db.db.BeginTxx(ctx, nil) + if err != nil { + return nil, fmt.Errorf("beginning transaction: %w", err) + } + + hookMgr := &hookManager{} + ctx = hookMgr.register(ctx) + + return context.WithValue(ctx, txnKey, tx), nil +} + +func (db *Database) Commit(ctx context.Context) error { + tx, err := getTx(ctx) + if err != nil { return err } - var err error - t.tx, err = database.DB.BeginTxx(t.Ctx, nil) - if err != nil { - return fmt.Errorf("error starting transaction: %v", err) - } - - return nil -} - -func (t *transaction) Rollback() error { - if t.tx == nil { - return errors.New("not in transaction") - } - - err := t.tx.Rollback() - if err != nil { - return fmt.Errorf("error rolling back transaction: %v", err) - } - t.tx = nil - - return nil -} - -func (t *transaction) Commit() error { - if t.tx == nil { - return errors.New("not in transaction") - } - - err := t.tx.Commit() - if err != nil { - return fmt.Errorf("error committing transaction: %v", err) - } - t.tx = nil - - return nil -} - -func (t *transaction) Repository() models.Repository { - return t -} - -func (t *transaction) ensureTx() { - if t.tx == nil { - panic("tx is nil") - } -} - -func (t *transaction) Gallery() models.GalleryReaderWriter { - t.ensureTx() - return NewGalleryReaderWriter(t.tx) -} - -func (t *transaction) Image() models.ImageReaderWriter { - t.ensureTx() - return NewImageReaderWriter(t.tx) -} - -func (t *transaction) Movie() models.MovieReaderWriter { - t.ensureTx() - return NewMovieReaderWriter(t.tx) -} - -func (t *transaction) Performer() models.PerformerReaderWriter { - t.ensureTx() - return NewPerformerReaderWriter(t.tx) -} - -func (t *transaction) SceneMarker() models.SceneMarkerReaderWriter { - t.ensureTx() - return NewSceneMarkerReaderWriter(t.tx) -} - -func (t *transaction) Scene() models.SceneReaderWriter { - t.ensureTx() - return NewSceneReaderWriter(t.tx) -} - -func (t *transaction) ScrapedItem() models.ScrapedItemReaderWriter { - t.ensureTx() - return NewScrapedItemReaderWriter(t.tx) -} - -func (t *transaction) Studio() models.StudioReaderWriter { - t.ensureTx() - return NewStudioReaderWriter(t.tx) -} - -func (t *transaction) Tag() models.TagReaderWriter { - t.ensureTx() - return NewTagReaderWriter(t.tx) -} - -func (t *transaction) SavedFilter() models.SavedFilterReaderWriter { - t.ensureTx() - return NewSavedFilterReaderWriter(t.tx) -} - -type ReadTransaction struct{} - -func (t *ReadTransaction) Begin() error { - if err := database.Ready(); err != nil { + if err := tx.Commit(); err != nil { return err } + // execute post-commit hooks + db.executePostCommitHooks(ctx) + return nil } -func (t *ReadTransaction) Rollback() error { +func (db *Database) Rollback(ctx context.Context) error { + tx, err := getTx(ctx) + if err != nil { + return err + } + + if err := tx.Rollback(); err != nil { + return err + } + + // execute post-rollback hooks + db.executePostRollbackHooks(ctx) + return nil } -func (t *ReadTransaction) Commit() error { - return nil +func getTx(ctx context.Context) (*sqlx.Tx, error) { + tx, ok := ctx.Value(txnKey).(*sqlx.Tx) + if !ok || tx == nil { + return nil, fmt.Errorf("not in transaction") + } + return tx, nil } -func (t *ReadTransaction) Repository() models.ReaderRepository { - return t +func getDBReader(ctx context.Context) (dbReader, error) { + // get transaction first if present + tx, ok := ctx.Value(txnKey).(*sqlx.Tx) + if !ok || tx == nil { + // try to get database if present + db, ok := ctx.Value(dbKey).(*sqlx.DB) + if !ok || db == nil { + return nil, fmt.Errorf("not in transaction") + } + return db, nil + } + return tx, nil } -func (t *ReadTransaction) Gallery() models.GalleryReader { - return NewGalleryReaderWriter(database.DB) +func (db *Database) IsLocked(err error) bool { + var sqliteError sqlite3.Error + if errors.As(err, &sqliteError) { + return sqliteError.Code == sqlite3.ErrBusy + } + return false } -func (t *ReadTransaction) Image() models.ImageReader { - return NewImageReaderWriter(database.DB) -} - -func (t *ReadTransaction) Movie() models.MovieReader { - return NewMovieReaderWriter(database.DB) -} - -func (t *ReadTransaction) Performer() models.PerformerReader { - return NewPerformerReaderWriter(database.DB) -} - -func (t *ReadTransaction) SceneMarker() models.SceneMarkerReader { - return NewSceneMarkerReaderWriter(database.DB) -} - -func (t *ReadTransaction) Scene() models.SceneReader { - return NewSceneReaderWriter(database.DB) -} - -func (t *ReadTransaction) ScrapedItem() models.ScrapedItemReader { - return NewScrapedItemReaderWriter(database.DB) -} - -func (t *ReadTransaction) Studio() models.StudioReader { - return NewStudioReaderWriter(database.DB) -} - -func (t *ReadTransaction) Tag() models.TagReader { - return NewTagReaderWriter(database.DB) -} - -func (t *ReadTransaction) SavedFilter() models.SavedFilterReader { - return NewSavedFilterReaderWriter(database.DB) -} - -type TransactionManager struct { -} - -func NewTransactionManager() *TransactionManager { - return &TransactionManager{} -} - -func (t *TransactionManager) WithTxn(ctx context.Context, fn func(r models.Repository) error) error { - database.WriteMu.Lock() - defer database.WriteMu.Unlock() - return models.WithTxn(&transaction{Ctx: ctx}, fn) -} - -func (t *TransactionManager) WithReadTxn(ctx context.Context, fn func(r models.ReaderRepository) error) error { - return models.WithROTxn(&ReadTransaction{}, fn) +func (db *Database) TxnRepository() models.Repository { + return models.Repository{ + TxnManager: db, + File: db.File, + Folder: db.Folder, + Gallery: db.Gallery, + Image: db.Image, + Movie: MovieReaderWriter, + Performer: PerformerReaderWriter, + Scene: db.Scene, + SceneMarker: SceneMarkerReaderWriter, + ScrapedItem: ScrapedItemReaderWriter, + Studio: StudioReaderWriter, + Tag: TagReaderWriter, + SavedFilter: SavedFilterReaderWriter, + } } diff --git a/pkg/sqlite/tx.go b/pkg/sqlite/tx.go new file mode 100644 index 000000000..345852c76 --- /dev/null +++ b/pkg/sqlite/tx.go @@ -0,0 +1,119 @@ +package sqlite + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/logger" +) + +const ( + slowLogTime = time.Millisecond * 200 +) + +type dbReader interface { + Get(dest interface{}, query string, args ...interface{}) error + Select(dest interface{}, query string, args ...interface{}) error + Queryx(query string, args ...interface{}) (*sqlx.Rows, error) + QueryxContext(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) +} + +func logSQL(start time.Time, query string, args ...interface{}) { + since := time.Since(start) + if since >= slowLogTime { + logger.Debugf("SLOW SQL [%v]: %s, args: %v", since, query, args) + } else { + logger.Tracef("SQL [%v]: %s, args: %v", since, query, args) + } +} + +type dbWrapper struct{} + +func sqlError(err error, sql string, args ...interface{}) error { + if err == nil { + return nil + } + + return fmt.Errorf("error executing `%s` [%v]: %w", sql, args, err) +} + +func (*dbWrapper) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + tx, err := getDBReader(ctx) + if err != nil { + return sqlError(err, query, args...) + } + + start := time.Now() + err = tx.Get(dest, query, args...) + logSQL(start, query, args...) + + return sqlError(err, query, args...) +} + +func (*dbWrapper) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + tx, err := getDBReader(ctx) + if err != nil { + return sqlError(err, query, args...) + } + + start := time.Now() + err = tx.Select(dest, query, args...) + logSQL(start, query, args...) + + return sqlError(err, query, args...) +} + +func (*dbWrapper) Queryx(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { + tx, err := getDBReader(ctx) + if err != nil { + return nil, sqlError(err, query, args...) + } + + start := time.Now() + ret, err := tx.Queryx(query, args...) + logSQL(start, query, args...) + + return ret, sqlError(err, query, args...) +} + +func (*dbWrapper) QueryxContext(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { + tx, err := getDBReader(ctx) + if err != nil { + return nil, sqlError(err, query, args...) + } + + start := time.Now() + ret, err := tx.QueryxContext(ctx, query, args...) + logSQL(start, query, args...) + + return ret, sqlError(err, query, args...) +} + +func (*dbWrapper) NamedExec(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + tx, err := getTx(ctx) + if err != nil { + return nil, sqlError(err, query, arg) + } + + start := time.Now() + ret, err := tx.NamedExec(query, arg) + logSQL(start, query, arg) + + return ret, sqlError(err, query, arg) +} + +func (*dbWrapper) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + tx, err := getTx(ctx) + if err != nil { + return nil, sqlError(err, query, args...) + } + + start := time.Now() + ret, err := tx.Exec(query, args...) + logSQL(start, query, args...) + + return ret, sqlError(err, query, args...) +} diff --git a/pkg/sqlite/values.go b/pkg/sqlite/values.go new file mode 100644 index 000000000..eafb8e462 --- /dev/null +++ b/pkg/sqlite/values.go @@ -0,0 +1,61 @@ +package sqlite + +import ( + "github.com/stashapp/stash/pkg/file" + + "gopkg.in/guregu/null.v4" +) + +// null package does not provide methods to convert null.Int to int pointer +func intFromPtr(i *int) null.Int { + if i == nil { + return null.NewInt(0, false) + } + + return null.IntFrom(int64(*i)) +} + +func nullIntPtr(i null.Int) *int { + if !i.Valid { + return nil + } + + v := int(i.Int64) + return &v +} + +func nullIntFolderIDPtr(i null.Int) *file.FolderID { + if !i.Valid { + return nil + } + + v := file.FolderID(i.Int64) + + return &v +} + +func nullIntFileIDPtr(i null.Int) *file.ID { + if !i.Valid { + return nil + } + + v := file.ID(i.Int64) + + return &v +} + +func nullIntFromFileIDPtr(i *file.ID) null.Int { + if i == nil { + return null.NewInt(0, false) + } + + return null.IntFrom(int64(*i)) +} + +func nullIntFromFolderIDPtr(i *file.FolderID) null.Int { + if i == nil { + return null.NewInt(0, false) + } + + return null.IntFrom(int64(*i)) +} diff --git a/pkg/studio/export.go b/pkg/studio/export.go index 951a60417..27cbaeb38 100644 --- a/pkg/studio/export.go +++ b/pkg/studio/export.go @@ -1,6 +1,7 @@ package studio import ( + "context" "fmt" "github.com/stashapp/stash/pkg/models" @@ -9,8 +10,15 @@ import ( "github.com/stashapp/stash/pkg/utils" ) +type FinderImageStashIDGetter interface { + Finder + GetAliases(ctx context.Context, studioID int) ([]string, error) + GetImage(ctx context.Context, studioID int) ([]byte, error) + models.StashIDLoader +} + // ToJSON converts a Studio object into its JSON equivalent. -func ToJSON(reader models.StudioReader, studio *models.Studio) (*jsonschema.Studio, error) { +func ToJSON(ctx context.Context, reader FinderImageStashIDGetter, studio *models.Studio) (*jsonschema.Studio, error) { newStudioJSON := jsonschema.Studio{ IgnoreAutoTag: studio.IgnoreAutoTag, CreatedAt: json.JSONTime{Time: studio.CreatedAt.Timestamp}, @@ -30,7 +38,7 @@ func ToJSON(reader models.StudioReader, studio *models.Studio) (*jsonschema.Stud } if studio.ParentID.Valid { - parent, err := reader.Find(int(studio.ParentID.Int64)) + parent, err := reader.Find(ctx, int(studio.ParentID.Int64)) if err != nil { return nil, fmt.Errorf("error getting parent studio: %v", err) } @@ -44,14 +52,14 @@ func ToJSON(reader models.StudioReader, studio *models.Studio) (*jsonschema.Stud newStudioJSON.Rating = int(studio.Rating.Int64) } - aliases, err := reader.GetAliases(studio.ID) + aliases, err := reader.GetAliases(ctx, studio.ID) if err != nil { return nil, fmt.Errorf("error getting studio aliases: %v", err) } newStudioJSON.Aliases = aliases - image, err := reader.GetImage(studio.ID) + image, err := reader.GetImage(ctx, studio.ID) if err != nil { return nil, fmt.Errorf("error getting studio image: %v", err) } @@ -60,7 +68,7 @@ func ToJSON(reader models.StudioReader, studio *models.Studio) (*jsonschema.Stud newStudioJSON.Image = utils.GetBase64StringFromData(image) } - stashIDs, _ := reader.GetStashIDs(studio.ID) + stashIDs, _ := reader.GetStashIDs(ctx, studio.ID) var ret []models.StashID for _, stashID := range stashIDs { newJoin := models.StashID{ diff --git a/pkg/studio/export_test.go b/pkg/studio/export_test.go index a1f261254..8b329668e 100644 --- a/pkg/studio/export_test.go +++ b/pkg/studio/export_test.go @@ -1,6 +1,7 @@ package studio import ( + "context" "errors" "github.com/stashapp/stash/pkg/models" @@ -45,8 +46,8 @@ var stashID = models.StashID{ StashID: "StashID", Endpoint: "Endpoint", } -var stashIDs = []*models.StashID{ - &stashID, +var stashIDs = []models.StashID{ + stashID, } const image = "aW1hZ2VCeXRlcw==" @@ -169,39 +170,40 @@ func initTestTable() { func TestToJSON(t *testing.T) { initTestTable() + ctx := context.Background() mockStudioReader := &mocks.StudioReaderWriter{} imageErr := errors.New("error getting image") - mockStudioReader.On("GetImage", studioID).Return(imageBytes, nil).Once() - mockStudioReader.On("GetImage", noImageID).Return(nil, nil).Once() - mockStudioReader.On("GetImage", errImageID).Return(nil, imageErr).Once() - mockStudioReader.On("GetImage", missingParentStudioID).Return(imageBytes, nil).Maybe() - mockStudioReader.On("GetImage", errStudioID).Return(imageBytes, nil).Maybe() - mockStudioReader.On("GetImage", errAliasID).Return(imageBytes, nil).Maybe() + mockStudioReader.On("GetImage", ctx, studioID).Return(imageBytes, nil).Once() + mockStudioReader.On("GetImage", ctx, noImageID).Return(nil, nil).Once() + mockStudioReader.On("GetImage", ctx, errImageID).Return(nil, imageErr).Once() + mockStudioReader.On("GetImage", ctx, missingParentStudioID).Return(imageBytes, nil).Maybe() + mockStudioReader.On("GetImage", ctx, errStudioID).Return(imageBytes, nil).Maybe() + mockStudioReader.On("GetImage", ctx, errAliasID).Return(imageBytes, nil).Maybe() parentStudioErr := errors.New("error getting parent studio") - mockStudioReader.On("Find", parentStudioID).Return(&parentStudio, nil) - mockStudioReader.On("Find", missingStudioID).Return(nil, nil) - mockStudioReader.On("Find", errParentStudioID).Return(nil, parentStudioErr) + mockStudioReader.On("Find", ctx, parentStudioID).Return(&parentStudio, nil) + mockStudioReader.On("Find", ctx, missingStudioID).Return(nil, nil) + mockStudioReader.On("Find", ctx, errParentStudioID).Return(nil, parentStudioErr) aliasErr := errors.New("error getting aliases") - mockStudioReader.On("GetAliases", studioID).Return([]string{"alias"}, nil).Once() - mockStudioReader.On("GetAliases", noImageID).Return(nil, nil).Once() - mockStudioReader.On("GetAliases", errImageID).Return(nil, nil).Once() - mockStudioReader.On("GetAliases", missingParentStudioID).Return(nil, nil).Once() - mockStudioReader.On("GetAliases", errAliasID).Return(nil, aliasErr).Once() + mockStudioReader.On("GetAliases", ctx, studioID).Return([]string{"alias"}, nil).Once() + mockStudioReader.On("GetAliases", ctx, noImageID).Return(nil, nil).Once() + mockStudioReader.On("GetAliases", ctx, errImageID).Return(nil, nil).Once() + mockStudioReader.On("GetAliases", ctx, missingParentStudioID).Return(nil, nil).Once() + mockStudioReader.On("GetAliases", ctx, errAliasID).Return(nil, aliasErr).Once() - mockStudioReader.On("GetStashIDs", studioID).Return(stashIDs, nil).Once() - mockStudioReader.On("GetStashIDs", noImageID).Return(nil, nil).Once() - mockStudioReader.On("GetStashIDs", missingParentStudioID).Return(stashIDs, nil).Once() + mockStudioReader.On("GetStashIDs", ctx, studioID).Return(stashIDs, nil).Once() + mockStudioReader.On("GetStashIDs", ctx, noImageID).Return(nil, nil).Once() + mockStudioReader.On("GetStashIDs", ctx, missingParentStudioID).Return(stashIDs, nil).Once() for i, s := range scenarios { studio := s.input - json, err := ToJSON(mockStudioReader, &studio) + json, err := ToJSON(ctx, mockStudioReader, &studio) switch { case !s.err && err != nil: diff --git a/pkg/studio/import.go b/pkg/studio/import.go index a44481982..627d81272 100644 --- a/pkg/studio/import.go +++ b/pkg/studio/import.go @@ -1,6 +1,7 @@ package studio import ( + "context" "database/sql" "errors" "fmt" @@ -11,10 +12,19 @@ import ( "github.com/stashapp/stash/pkg/utils" ) +type NameFinderCreatorUpdater interface { + FindByName(ctx context.Context, name string, nocase bool) (*models.Studio, error) + Create(ctx context.Context, newStudio models.Studio) (*models.Studio, error) + UpdateFull(ctx context.Context, updatedStudio models.Studio) (*models.Studio, error) + UpdateImage(ctx context.Context, studioID int, image []byte) error + UpdateAliases(ctx context.Context, studioID int, aliases []string) error + UpdateStashIDs(ctx context.Context, studioID int, stashIDs []models.StashID) error +} + var ErrParentStudioNotExist = errors.New("parent studio does not exist") type Importer struct { - ReaderWriter models.StudioReaderWriter + ReaderWriter NameFinderCreatorUpdater Input jsonschema.Studio MissingRefBehaviour models.ImportMissingRefEnum @@ -22,7 +32,7 @@ type Importer struct { imageData []byte } -func (i *Importer) PreImport() error { +func (i *Importer) PreImport(ctx context.Context) error { checksum := md5.FromString(i.Input.Name) i.studio = models.Studio{ @@ -36,7 +46,7 @@ func (i *Importer) PreImport() error { Rating: sql.NullInt64{Int64: int64(i.Input.Rating), Valid: true}, } - if err := i.populateParentStudio(); err != nil { + if err := i.populateParentStudio(ctx); err != nil { return err } @@ -51,9 +61,9 @@ func (i *Importer) PreImport() error { return nil } -func (i *Importer) populateParentStudio() error { +func (i *Importer) populateParentStudio(ctx context.Context) error { if i.Input.ParentStudio != "" { - studio, err := i.ReaderWriter.FindByName(i.Input.ParentStudio, false) + studio, err := i.ReaderWriter.FindByName(ctx, i.Input.ParentStudio, false) if err != nil { return fmt.Errorf("error finding studio by name: %v", err) } @@ -68,7 +78,7 @@ func (i *Importer) populateParentStudio() error { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - parentID, err := i.createParentStudio(i.Input.ParentStudio) + parentID, err := i.createParentStudio(ctx, i.Input.ParentStudio) if err != nil { return err } @@ -85,10 +95,10 @@ func (i *Importer) populateParentStudio() error { return nil } -func (i *Importer) createParentStudio(name string) (int, error) { +func (i *Importer) createParentStudio(ctx context.Context, name string) (int, error) { newStudio := *models.NewStudio(name) - created, err := i.ReaderWriter.Create(newStudio) + created, err := i.ReaderWriter.Create(ctx, newStudio) if err != nil { return 0, err } @@ -96,20 +106,20 @@ func (i *Importer) createParentStudio(name string) (int, error) { return created.ID, nil } -func (i *Importer) PostImport(id int) error { +func (i *Importer) PostImport(ctx context.Context, id int) error { if len(i.imageData) > 0 { - if err := i.ReaderWriter.UpdateImage(id, i.imageData); err != nil { + if err := i.ReaderWriter.UpdateImage(ctx, id, i.imageData); err != nil { return fmt.Errorf("error setting studio image: %v", err) } } if len(i.Input.StashIDs) > 0 { - if err := i.ReaderWriter.UpdateStashIDs(id, i.Input.StashIDs); err != nil { + if err := i.ReaderWriter.UpdateStashIDs(ctx, id, i.Input.StashIDs); err != nil { return fmt.Errorf("error setting stash id: %v", err) } } - if err := i.ReaderWriter.UpdateAliases(id, i.Input.Aliases); err != nil { + if err := i.ReaderWriter.UpdateAliases(ctx, id, i.Input.Aliases); err != nil { return fmt.Errorf("error setting tag aliases: %v", err) } @@ -120,9 +130,9 @@ func (i *Importer) Name() string { return i.Input.Name } -func (i *Importer) FindExistingID() (*int, error) { +func (i *Importer) FindExistingID(ctx context.Context) (*int, error) { const nocase = false - existing, err := i.ReaderWriter.FindByName(i.Name(), nocase) + existing, err := i.ReaderWriter.FindByName(ctx, i.Name(), nocase) if err != nil { return nil, err } @@ -135,8 +145,8 @@ func (i *Importer) FindExistingID() (*int, error) { return nil, nil } -func (i *Importer) Create() (*int, error) { - created, err := i.ReaderWriter.Create(i.studio) +func (i *Importer) Create(ctx context.Context) (*int, error) { + created, err := i.ReaderWriter.Create(ctx, i.studio) if err != nil { return nil, fmt.Errorf("error creating studio: %v", err) } @@ -145,10 +155,10 @@ func (i *Importer) Create() (*int, error) { return &id, nil } -func (i *Importer) Update(id int) error { +func (i *Importer) Update(ctx context.Context, id int) error { studio := i.studio studio.ID = id - _, err := i.ReaderWriter.UpdateFull(studio) + _, err := i.ReaderWriter.UpdateFull(ctx, studio) if err != nil { return fmt.Errorf("error updating existing studio: %v", err) } diff --git a/pkg/studio/import_test.go b/pkg/studio/import_test.go index 87b22519b..fc2ae402b 100644 --- a/pkg/studio/import_test.go +++ b/pkg/studio/import_test.go @@ -1,6 +1,7 @@ package studio import ( + "context" "errors" "testing" @@ -43,21 +44,22 @@ func TestImporterPreImport(t *testing.T) { IgnoreAutoTag: autoTagIgnored, }, } + ctx := context.Background() - err := i.PreImport() + err := i.PreImport(ctx) assert.NotNil(t, err) i.Input.Image = image - err = i.PreImport() + err = i.PreImport(ctx) assert.Nil(t, err) i.Input = *createFullJSONStudio(studioName, image, []string{"alias"}) i.Input.ParentStudio = "" - err = i.PreImport() + err = i.PreImport(ctx) assert.Nil(t, err) expectedStudio := createFullStudio(0, 0) @@ -68,6 +70,7 @@ func TestImporterPreImport(t *testing.T) { func TestImporterPreImportWithParent(t *testing.T) { readerWriter := &mocks.StudioReaderWriter{} + ctx := context.Background() i := Importer{ ReaderWriter: readerWriter, @@ -78,17 +81,17 @@ func TestImporterPreImportWithParent(t *testing.T) { }, } - readerWriter.On("FindByName", existingParentStudioName, false).Return(&models.Studio{ + readerWriter.On("FindByName", ctx, existingParentStudioName, false).Return(&models.Studio{ ID: existingStudioID, }, nil).Once() - readerWriter.On("FindByName", existingParentStudioErr, false).Return(nil, errors.New("FindByName error")).Once() + readerWriter.On("FindByName", ctx, existingParentStudioErr, false).Return(nil, errors.New("FindByName error")).Once() - err := i.PreImport() + err := i.PreImport(ctx) assert.Nil(t, err) assert.Equal(t, int64(existingStudioID), i.studio.ParentID.Int64) i.Input.ParentStudio = existingParentStudioErr - err = i.PreImport() + err = i.PreImport(ctx) assert.NotNil(t, err) readerWriter.AssertExpectations(t) @@ -96,6 +99,7 @@ func TestImporterPreImportWithParent(t *testing.T) { func TestImporterPreImportWithMissingParent(t *testing.T) { readerWriter := &mocks.StudioReaderWriter{} + ctx := context.Background() i := Importer{ ReaderWriter: readerWriter, @@ -107,20 +111,20 @@ func TestImporterPreImportWithMissingParent(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumFail, } - readerWriter.On("FindByName", missingParentStudioName, false).Return(nil, nil).Times(3) - readerWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(&models.Studio{ + readerWriter.On("FindByName", ctx, missingParentStudioName, false).Return(nil, nil).Times(3) + readerWriter.On("Create", ctx, mock.AnythingOfType("models.Studio")).Return(&models.Studio{ ID: existingStudioID, }, nil) - err := i.PreImport() + err := i.PreImport(ctx) assert.NotNil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore - err = i.PreImport() + err = i.PreImport(ctx) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumCreate - err = i.PreImport() + err = i.PreImport(ctx) assert.Nil(t, err) assert.Equal(t, int64(existingStudioID), i.studio.ParentID.Int64) @@ -129,6 +133,7 @@ func TestImporterPreImportWithMissingParent(t *testing.T) { func TestImporterPreImportWithMissingParentCreateErr(t *testing.T) { readerWriter := &mocks.StudioReaderWriter{} + ctx := context.Background() i := Importer{ ReaderWriter: readerWriter, @@ -140,15 +145,16 @@ func TestImporterPreImportWithMissingParentCreateErr(t *testing.T) { MissingRefBehaviour: models.ImportMissingRefEnumCreate, } - readerWriter.On("FindByName", missingParentStudioName, false).Return(nil, nil).Once() - readerWriter.On("Create", mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) + readerWriter.On("FindByName", ctx, missingParentStudioName, false).Return(nil, nil).Once() + readerWriter.On("Create", ctx, mock.AnythingOfType("models.Studio")).Return(nil, errors.New("Create error")) - err := i.PreImport() + err := i.PreImport(ctx) assert.NotNil(t, err) } func TestImporterPostImport(t *testing.T) { readerWriter := &mocks.StudioReaderWriter{} + ctx := context.Background() i := Importer{ ReaderWriter: readerWriter, @@ -161,21 +167,21 @@ func TestImporterPostImport(t *testing.T) { updateStudioImageErr := errors.New("UpdateImage error") updateTagAliasErr := errors.New("UpdateAlias error") - readerWriter.On("UpdateImage", studioID, imageBytes).Return(nil).Once() - readerWriter.On("UpdateImage", errImageID, imageBytes).Return(updateStudioImageErr).Once() - readerWriter.On("UpdateImage", errAliasID, imageBytes).Return(nil).Once() + readerWriter.On("UpdateImage", ctx, studioID, imageBytes).Return(nil).Once() + readerWriter.On("UpdateImage", ctx, errImageID, imageBytes).Return(updateStudioImageErr).Once() + readerWriter.On("UpdateImage", ctx, errAliasID, imageBytes).Return(nil).Once() - readerWriter.On("UpdateAliases", studioID, i.Input.Aliases).Return(nil).Once() - readerWriter.On("UpdateAliases", errImageID, i.Input.Aliases).Return(nil).Maybe() - readerWriter.On("UpdateAliases", errAliasID, i.Input.Aliases).Return(updateTagAliasErr).Once() + readerWriter.On("UpdateAliases", ctx, studioID, i.Input.Aliases).Return(nil).Once() + readerWriter.On("UpdateAliases", ctx, errImageID, i.Input.Aliases).Return(nil).Maybe() + readerWriter.On("UpdateAliases", ctx, errAliasID, i.Input.Aliases).Return(updateTagAliasErr).Once() - err := i.PostImport(studioID) + err := i.PostImport(ctx, studioID) assert.Nil(t, err) - err = i.PostImport(errImageID) + err = i.PostImport(ctx, errImageID) assert.NotNil(t, err) - err = i.PostImport(errAliasID) + err = i.PostImport(ctx, errAliasID) assert.NotNil(t, err) readerWriter.AssertExpectations(t) @@ -183,6 +189,7 @@ func TestImporterPostImport(t *testing.T) { func TestImporterFindExistingID(t *testing.T) { readerWriter := &mocks.StudioReaderWriter{} + ctx := context.Background() i := Importer{ ReaderWriter: readerWriter, @@ -192,23 +199,23 @@ func TestImporterFindExistingID(t *testing.T) { } errFindByName := errors.New("FindByName error") - readerWriter.On("FindByName", studioName, false).Return(nil, nil).Once() - readerWriter.On("FindByName", existingStudioName, false).Return(&models.Studio{ + readerWriter.On("FindByName", ctx, studioName, false).Return(nil, nil).Once() + readerWriter.On("FindByName", ctx, existingStudioName, false).Return(&models.Studio{ ID: existingStudioID, }, nil).Once() - readerWriter.On("FindByName", studioNameErr, false).Return(nil, errFindByName).Once() + readerWriter.On("FindByName", ctx, studioNameErr, false).Return(nil, errFindByName).Once() - id, err := i.FindExistingID() + id, err := i.FindExistingID(ctx) assert.Nil(t, id) assert.Nil(t, err) i.Input.Name = existingStudioName - id, err = i.FindExistingID() + id, err = i.FindExistingID(ctx) assert.Equal(t, existingStudioID, *id) assert.Nil(t, err) i.Input.Name = studioNameErr - id, err = i.FindExistingID() + id, err = i.FindExistingID(ctx) assert.Nil(t, id) assert.NotNil(t, err) @@ -217,6 +224,7 @@ func TestImporterFindExistingID(t *testing.T) { func TestCreate(t *testing.T) { readerWriter := &mocks.StudioReaderWriter{} + ctx := context.Background() studio := models.Studio{ Name: models.NullString(studioName), @@ -232,17 +240,17 @@ func TestCreate(t *testing.T) { } errCreate := errors.New("Create error") - readerWriter.On("Create", studio).Return(&models.Studio{ + readerWriter.On("Create", ctx, studio).Return(&models.Studio{ ID: studioID, }, nil).Once() - readerWriter.On("Create", studioErr).Return(nil, errCreate).Once() + readerWriter.On("Create", ctx, studioErr).Return(nil, errCreate).Once() - id, err := i.Create() + id, err := i.Create(ctx) assert.Equal(t, studioID, *id) assert.Nil(t, err) i.studio = studioErr - id, err = i.Create() + id, err = i.Create(ctx) assert.Nil(t, id) assert.NotNil(t, err) @@ -251,6 +259,7 @@ func TestCreate(t *testing.T) { func TestUpdate(t *testing.T) { readerWriter := &mocks.StudioReaderWriter{} + ctx := context.Background() studio := models.Studio{ Name: models.NullString(studioName), @@ -269,18 +278,18 @@ func TestUpdate(t *testing.T) { // id needs to be set for the mock input studio.ID = studioID - readerWriter.On("UpdateFull", studio).Return(nil, nil).Once() + readerWriter.On("UpdateFull", ctx, studio).Return(nil, nil).Once() - err := i.Update(studioID) + err := i.Update(ctx, studioID) assert.Nil(t, err) i.studio = studioErr // need to set id separately studioErr.ID = errImageID - readerWriter.On("UpdateFull", studioErr).Return(nil, errUpdate).Once() + readerWriter.On("UpdateFull", ctx, studioErr).Return(nil, errUpdate).Once() - err = i.Update(errImageID) + err = i.Update(ctx, errImageID) assert.NotNil(t, err) readerWriter.AssertExpectations(t) diff --git a/pkg/studio/query.go b/pkg/studio/query.go index 5b2f68896..dee499a1b 100644 --- a/pkg/studio/query.go +++ b/pkg/studio/query.go @@ -1,8 +1,20 @@ package studio -import "github.com/stashapp/stash/pkg/models" +import ( + "context" -func ByName(qb models.StudioReader, name string) (*models.Studio, error) { + "github.com/stashapp/stash/pkg/models" +) + +type Finder interface { + Find(ctx context.Context, id int) (*models.Studio, error) +} + +type Queryer interface { + Query(ctx context.Context, studioFilter *models.StudioFilterType, findFilter *models.FindFilterType) ([]*models.Studio, int, error) +} + +func ByName(ctx context.Context, qb Queryer, name string) (*models.Studio, error) { f := &models.StudioFilterType{ Name: &models.StringCriterionInput{ Value: name, @@ -11,7 +23,7 @@ func ByName(qb models.StudioReader, name string) (*models.Studio, error) { } pp := 1 - ret, count, err := qb.Query(f, &models.FindFilterType{ + ret, count, err := qb.Query(ctx, f, &models.FindFilterType{ PerPage: &pp, }) @@ -26,7 +38,7 @@ func ByName(qb models.StudioReader, name string) (*models.Studio, error) { return nil, nil } -func ByAlias(qb models.StudioReader, alias string) (*models.Studio, error) { +func ByAlias(ctx context.Context, qb Queryer, alias string) (*models.Studio, error) { f := &models.StudioFilterType{ Aliases: &models.StringCriterionInput{ Value: alias, @@ -35,7 +47,7 @@ func ByAlias(qb models.StudioReader, alias string) (*models.Studio, error) { } pp := 1 - ret, count, err := qb.Query(f, &models.FindFilterType{ + ret, count, err := qb.Query(ctx, f, &models.FindFilterType{ PerPage: &pp, }) diff --git a/pkg/studio/update.go b/pkg/studio/update.go index 35a655a73..addae5c94 100644 --- a/pkg/studio/update.go +++ b/pkg/studio/update.go @@ -1,11 +1,17 @@ package studio import ( + "context" "fmt" "github.com/stashapp/stash/pkg/models" ) +type NameFinderCreator interface { + FindByName(ctx context.Context, name string, nocase bool) (*models.Studio, error) + Create(ctx context.Context, newStudio models.Studio) (*models.Studio, error) +} + type NameExistsError struct { Name string } @@ -25,9 +31,9 @@ func (e *NameUsedByAliasError) Error() string { // EnsureStudioNameUnique returns an error if the studio name provided // is used as a name or alias of another existing tag. -func EnsureStudioNameUnique(id int, name string, qb models.StudioReader) error { +func EnsureStudioNameUnique(ctx context.Context, id int, name string, qb Queryer) error { // ensure name is unique - sameNameStudio, err := ByName(qb, name) + sameNameStudio, err := ByName(ctx, qb, name) if err != nil { return err } @@ -39,7 +45,7 @@ func EnsureStudioNameUnique(id int, name string, qb models.StudioReader) error { } // query by alias - sameNameStudio, err = ByAlias(qb, name) + sameNameStudio, err = ByAlias(ctx, qb, name) if err != nil { return err } @@ -54,9 +60,9 @@ func EnsureStudioNameUnique(id int, name string, qb models.StudioReader) error { return nil } -func EnsureAliasesUnique(id int, aliases []string, qb models.StudioReader) error { +func EnsureAliasesUnique(ctx context.Context, id int, aliases []string, qb Queryer) error { for _, a := range aliases { - if err := EnsureStudioNameUnique(id, a, qb); err != nil { + if err := EnsureStudioNameUnique(ctx, id, a, qb); err != nil { return err } } diff --git a/pkg/tag/export.go b/pkg/tag/export.go index e70392379..20c1b4adc 100644 --- a/pkg/tag/export.go +++ b/pkg/tag/export.go @@ -1,6 +1,7 @@ package tag import ( + "context" "fmt" "github.com/stashapp/stash/pkg/models" @@ -9,8 +10,14 @@ import ( "github.com/stashapp/stash/pkg/utils" ) +type FinderAliasImageGetter interface { + GetAliases(ctx context.Context, studioID int) ([]string, error) + GetImage(ctx context.Context, tagID int) ([]byte, error) + FindByChildTagID(ctx context.Context, childID int) ([]*models.Tag, error) +} + // ToJSON converts a Tag object into its JSON equivalent. -func ToJSON(reader models.TagReader, tag *models.Tag) (*jsonschema.Tag, error) { +func ToJSON(ctx context.Context, reader FinderAliasImageGetter, tag *models.Tag) (*jsonschema.Tag, error) { newTagJSON := jsonschema.Tag{ Name: tag.Name, IgnoreAutoTag: tag.IgnoreAutoTag, @@ -18,14 +25,14 @@ func ToJSON(reader models.TagReader, tag *models.Tag) (*jsonschema.Tag, error) { UpdatedAt: json.JSONTime{Time: tag.UpdatedAt.Timestamp}, } - aliases, err := reader.GetAliases(tag.ID) + aliases, err := reader.GetAliases(ctx, tag.ID) if err != nil { return nil, fmt.Errorf("error getting tag aliases: %v", err) } newTagJSON.Aliases = aliases - image, err := reader.GetImage(tag.ID) + image, err := reader.GetImage(ctx, tag.ID) if err != nil { return nil, fmt.Errorf("error getting tag image: %v", err) } @@ -34,7 +41,7 @@ func ToJSON(reader models.TagReader, tag *models.Tag) (*jsonschema.Tag, error) { newTagJSON.Image = utils.GetBase64StringFromData(image) } - parents, err := reader.FindByChildTagID(tag.ID) + parents, err := reader.FindByChildTagID(ctx, tag.ID) if err != nil { return nil, fmt.Errorf("error getting parents: %v", err) } diff --git a/pkg/tag/export_test.go b/pkg/tag/export_test.go index 930c0fdb1..255c940dd 100644 --- a/pkg/tag/export_test.go +++ b/pkg/tag/export_test.go @@ -1,6 +1,7 @@ package tag import ( + "context" "errors" "github.com/stashapp/stash/pkg/models" @@ -106,33 +107,34 @@ func initTestTable() { func TestToJSON(t *testing.T) { initTestTable() + ctx := context.Background() mockTagReader := &mocks.TagReaderWriter{} imageErr := errors.New("error getting image") aliasErr := errors.New("error getting aliases") parentsErr := errors.New("error getting parents") - mockTagReader.On("GetAliases", tagID).Return([]string{"alias"}, nil).Once() - mockTagReader.On("GetAliases", noImageID).Return(nil, nil).Once() - mockTagReader.On("GetAliases", errImageID).Return(nil, nil).Once() - mockTagReader.On("GetAliases", errAliasID).Return(nil, aliasErr).Once() - mockTagReader.On("GetAliases", withParentsID).Return(nil, nil).Once() - mockTagReader.On("GetAliases", errParentsID).Return(nil, nil).Once() + mockTagReader.On("GetAliases", ctx, tagID).Return([]string{"alias"}, nil).Once() + mockTagReader.On("GetAliases", ctx, noImageID).Return(nil, nil).Once() + mockTagReader.On("GetAliases", ctx, errImageID).Return(nil, nil).Once() + mockTagReader.On("GetAliases", ctx, errAliasID).Return(nil, aliasErr).Once() + mockTagReader.On("GetAliases", ctx, withParentsID).Return(nil, nil).Once() + mockTagReader.On("GetAliases", ctx, errParentsID).Return(nil, nil).Once() - mockTagReader.On("GetImage", tagID).Return(imageBytes, nil).Once() - mockTagReader.On("GetImage", noImageID).Return(nil, nil).Once() - mockTagReader.On("GetImage", errImageID).Return(nil, imageErr).Once() - mockTagReader.On("GetImage", withParentsID).Return(imageBytes, nil).Once() - mockTagReader.On("GetImage", errParentsID).Return(nil, nil).Once() + mockTagReader.On("GetImage", ctx, tagID).Return(imageBytes, nil).Once() + mockTagReader.On("GetImage", ctx, noImageID).Return(nil, nil).Once() + mockTagReader.On("GetImage", ctx, errImageID).Return(nil, imageErr).Once() + mockTagReader.On("GetImage", ctx, withParentsID).Return(imageBytes, nil).Once() + mockTagReader.On("GetImage", ctx, errParentsID).Return(nil, nil).Once() - mockTagReader.On("FindByChildTagID", tagID).Return(nil, nil).Once() - mockTagReader.On("FindByChildTagID", noImageID).Return(nil, nil).Once() - mockTagReader.On("FindByChildTagID", withParentsID).Return([]*models.Tag{{Name: "parent"}}, nil).Once() - mockTagReader.On("FindByChildTagID", errParentsID).Return(nil, parentsErr).Once() + mockTagReader.On("FindByChildTagID", ctx, tagID).Return(nil, nil).Once() + mockTagReader.On("FindByChildTagID", ctx, noImageID).Return(nil, nil).Once() + mockTagReader.On("FindByChildTagID", ctx, withParentsID).Return([]*models.Tag{{Name: "parent"}}, nil).Once() + mockTagReader.On("FindByChildTagID", ctx, errParentsID).Return(nil, parentsErr).Once() for i, s := range scenarios { tag := s.tag - json, err := ToJSON(mockTagReader, &tag) + json, err := ToJSON(ctx, mockTagReader, &tag) switch { case !s.err && err != nil: diff --git a/pkg/tag/import.go b/pkg/tag/import.go index 66028946c..937ea2359 100644 --- a/pkg/tag/import.go +++ b/pkg/tag/import.go @@ -1,6 +1,7 @@ package tag import ( + "context" "fmt" "github.com/stashapp/stash/pkg/models" @@ -8,6 +9,15 @@ import ( "github.com/stashapp/stash/pkg/utils" ) +type NameFinderCreatorUpdater interface { + FindByName(ctx context.Context, name string, nocase bool) (*models.Tag, error) + Create(ctx context.Context, newTag models.Tag) (*models.Tag, error) + UpdateFull(ctx context.Context, updatedTag models.Tag) (*models.Tag, error) + UpdateImage(ctx context.Context, tagID int, image []byte) error + UpdateAliases(ctx context.Context, tagID int, aliases []string) error + UpdateParentTags(ctx context.Context, tagID int, parentIDs []int) error +} + type ParentTagNotExistError struct { missingParent string } @@ -21,7 +31,7 @@ func (e ParentTagNotExistError) MissingParent() string { } type Importer struct { - ReaderWriter models.TagReaderWriter + ReaderWriter NameFinderCreatorUpdater Input jsonschema.Tag MissingRefBehaviour models.ImportMissingRefEnum @@ -29,7 +39,7 @@ type Importer struct { imageData []byte } -func (i *Importer) PreImport() error { +func (i *Importer) PreImport(ctx context.Context) error { i.tag = models.Tag{ Name: i.Input.Name, IgnoreAutoTag: i.Input.IgnoreAutoTag, @@ -48,23 +58,23 @@ func (i *Importer) PreImport() error { return nil } -func (i *Importer) PostImport(id int) error { +func (i *Importer) PostImport(ctx context.Context, id int) error { if len(i.imageData) > 0 { - if err := i.ReaderWriter.UpdateImage(id, i.imageData); err != nil { + if err := i.ReaderWriter.UpdateImage(ctx, id, i.imageData); err != nil { return fmt.Errorf("error setting tag image: %v", err) } } - if err := i.ReaderWriter.UpdateAliases(id, i.Input.Aliases); err != nil { + if err := i.ReaderWriter.UpdateAliases(ctx, id, i.Input.Aliases); err != nil { return fmt.Errorf("error setting tag aliases: %v", err) } - parents, err := i.getParents() + parents, err := i.getParents(ctx) if err != nil { return err } - if err := i.ReaderWriter.UpdateParentTags(id, parents); err != nil { + if err := i.ReaderWriter.UpdateParentTags(ctx, id, parents); err != nil { return fmt.Errorf("error setting parents: %v", err) } @@ -75,9 +85,9 @@ func (i *Importer) Name() string { return i.Input.Name } -func (i *Importer) FindExistingID() (*int, error) { +func (i *Importer) FindExistingID(ctx context.Context) (*int, error) { const nocase = false - existing, err := i.ReaderWriter.FindByName(i.Name(), nocase) + existing, err := i.ReaderWriter.FindByName(ctx, i.Name(), nocase) if err != nil { return nil, err } @@ -90,8 +100,8 @@ func (i *Importer) FindExistingID() (*int, error) { return nil, nil } -func (i *Importer) Create() (*int, error) { - created, err := i.ReaderWriter.Create(i.tag) +func (i *Importer) Create(ctx context.Context) (*int, error) { + created, err := i.ReaderWriter.Create(ctx, i.tag) if err != nil { return nil, fmt.Errorf("error creating tag: %v", err) } @@ -100,10 +110,10 @@ func (i *Importer) Create() (*int, error) { return &id, nil } -func (i *Importer) Update(id int) error { +func (i *Importer) Update(ctx context.Context, id int) error { tag := i.tag tag.ID = id - _, err := i.ReaderWriter.UpdateFull(tag) + _, err := i.ReaderWriter.UpdateFull(ctx, tag) if err != nil { return fmt.Errorf("error updating existing tag: %v", err) } @@ -111,10 +121,10 @@ func (i *Importer) Update(id int) error { return nil } -func (i *Importer) getParents() ([]int, error) { +func (i *Importer) getParents(ctx context.Context) ([]int, error) { var parents []int for _, parent := range i.Input.Parents { - tag, err := i.ReaderWriter.FindByName(parent, false) + tag, err := i.ReaderWriter.FindByName(ctx, parent, false) if err != nil { return nil, fmt.Errorf("error finding parent by name: %v", err) } @@ -129,7 +139,7 @@ func (i *Importer) getParents() ([]int, error) { } if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { - parentID, err := i.createParent(parent) + parentID, err := i.createParent(ctx, parent) if err != nil { return nil, err } @@ -143,10 +153,10 @@ func (i *Importer) getParents() ([]int, error) { return parents, nil } -func (i *Importer) createParent(name string) (int, error) { +func (i *Importer) createParent(ctx context.Context, name string) (int, error) { newTag := *models.NewTag(name) - created, err := i.ReaderWriter.Create(newTag) + created, err := i.ReaderWriter.Create(ctx, newTag) if err != nil { return 0, err } diff --git a/pkg/tag/import_test.go b/pkg/tag/import_test.go index fb6f3c58f..e4fb3ce8d 100644 --- a/pkg/tag/import_test.go +++ b/pkg/tag/import_test.go @@ -1,6 +1,7 @@ package tag import ( + "context" "errors" "testing" @@ -23,6 +24,8 @@ const ( existingTagID = 100 ) +var testCtx = context.Background() + func TestImporterName(t *testing.T) { i := Importer{ Input: jsonschema.Tag{ @@ -42,13 +45,13 @@ func TestImporterPreImport(t *testing.T) { }, } - err := i.PreImport() + err := i.PreImport(testCtx) assert.NotNil(t, err) i.Input.Image = image - err = i.PreImport() + err = i.PreImport(testCtx) assert.Nil(t, err) } @@ -68,38 +71,38 @@ func TestImporterPostImport(t *testing.T) { updateTagAliasErr := errors.New("UpdateAlias error") updateTagParentsErr := errors.New("UpdateParentTags error") - readerWriter.On("UpdateAliases", tagID, i.Input.Aliases).Return(nil).Once() - readerWriter.On("UpdateAliases", errAliasID, i.Input.Aliases).Return(updateTagAliasErr).Once() - readerWriter.On("UpdateAliases", withParentsID, i.Input.Aliases).Return(nil).Once() - readerWriter.On("UpdateAliases", errParentsID, i.Input.Aliases).Return(nil).Once() + readerWriter.On("UpdateAliases", testCtx, tagID, i.Input.Aliases).Return(nil).Once() + readerWriter.On("UpdateAliases", testCtx, errAliasID, i.Input.Aliases).Return(updateTagAliasErr).Once() + readerWriter.On("UpdateAliases", testCtx, withParentsID, i.Input.Aliases).Return(nil).Once() + readerWriter.On("UpdateAliases", testCtx, errParentsID, i.Input.Aliases).Return(nil).Once() - readerWriter.On("UpdateImage", tagID, imageBytes).Return(nil).Once() - readerWriter.On("UpdateImage", errAliasID, imageBytes).Return(nil).Once() - readerWriter.On("UpdateImage", errImageID, imageBytes).Return(updateTagImageErr).Once() - readerWriter.On("UpdateImage", withParentsID, imageBytes).Return(nil).Once() - readerWriter.On("UpdateImage", errParentsID, imageBytes).Return(nil).Once() + readerWriter.On("UpdateImage", testCtx, tagID, imageBytes).Return(nil).Once() + readerWriter.On("UpdateImage", testCtx, errAliasID, imageBytes).Return(nil).Once() + readerWriter.On("UpdateImage", testCtx, errImageID, imageBytes).Return(updateTagImageErr).Once() + readerWriter.On("UpdateImage", testCtx, withParentsID, imageBytes).Return(nil).Once() + readerWriter.On("UpdateImage", testCtx, errParentsID, imageBytes).Return(nil).Once() var parentTags []int - readerWriter.On("UpdateParentTags", tagID, parentTags).Return(nil).Once() - readerWriter.On("UpdateParentTags", withParentsID, []int{100}).Return(nil).Once() - readerWriter.On("UpdateParentTags", errParentsID, []int{100}).Return(updateTagParentsErr).Once() + readerWriter.On("UpdateParentTags", testCtx, tagID, parentTags).Return(nil).Once() + readerWriter.On("UpdateParentTags", testCtx, withParentsID, []int{100}).Return(nil).Once() + readerWriter.On("UpdateParentTags", testCtx, errParentsID, []int{100}).Return(updateTagParentsErr).Once() - readerWriter.On("FindByName", "Parent", false).Return(&models.Tag{ID: 100}, nil) + readerWriter.On("FindByName", testCtx, "Parent", false).Return(&models.Tag{ID: 100}, nil) - err := i.PostImport(tagID) + err := i.PostImport(testCtx, tagID) assert.Nil(t, err) - err = i.PostImport(errImageID) + err = i.PostImport(testCtx, errImageID) assert.NotNil(t, err) - err = i.PostImport(errAliasID) + err = i.PostImport(testCtx, errAliasID) assert.NotNil(t, err) i.Input.Parents = []string{"Parent"} - err = i.PostImport(withParentsID) + err = i.PostImport(testCtx, withParentsID) assert.Nil(t, err) - err = i.PostImport(errParentsID) + err = i.PostImport(testCtx, errParentsID) assert.NotNil(t, err) readerWriter.AssertExpectations(t) @@ -129,70 +132,70 @@ func TestImporterPostImportParentMissing(t *testing.T) { var emptyParents []int - readerWriter.On("UpdateImage", mock.Anything, mock.Anything).Return(nil) - readerWriter.On("UpdateAliases", mock.Anything, mock.Anything).Return(nil) + readerWriter.On("UpdateImage", testCtx, mock.Anything, mock.Anything).Return(nil) + readerWriter.On("UpdateAliases", testCtx, mock.Anything, mock.Anything).Return(nil) - readerWriter.On("FindByName", "Create", false).Return(nil, nil).Once() - readerWriter.On("FindByName", "CreateError", false).Return(nil, nil).Once() - readerWriter.On("FindByName", "CreateFindError", false).Return(nil, findError).Once() - readerWriter.On("FindByName", "CreateFound", false).Return(&models.Tag{ID: 101}, nil).Once() - readerWriter.On("FindByName", "Fail", false).Return(nil, nil).Once() - readerWriter.On("FindByName", "FailFindError", false).Return(nil, findError) - readerWriter.On("FindByName", "FailFound", false).Return(&models.Tag{ID: 102}, nil).Once() - readerWriter.On("FindByName", "Ignore", false).Return(nil, nil).Once() - readerWriter.On("FindByName", "IgnoreFindError", false).Return(nil, findError) - readerWriter.On("FindByName", "IgnoreFound", false).Return(&models.Tag{ID: 103}, nil).Once() + readerWriter.On("FindByName", testCtx, "Create", false).Return(nil, nil).Once() + readerWriter.On("FindByName", testCtx, "CreateError", false).Return(nil, nil).Once() + readerWriter.On("FindByName", testCtx, "CreateFindError", false).Return(nil, findError).Once() + readerWriter.On("FindByName", testCtx, "CreateFound", false).Return(&models.Tag{ID: 101}, nil).Once() + readerWriter.On("FindByName", testCtx, "Fail", false).Return(nil, nil).Once() + readerWriter.On("FindByName", testCtx, "FailFindError", false).Return(nil, findError) + readerWriter.On("FindByName", testCtx, "FailFound", false).Return(&models.Tag{ID: 102}, nil).Once() + readerWriter.On("FindByName", testCtx, "Ignore", false).Return(nil, nil).Once() + readerWriter.On("FindByName", testCtx, "IgnoreFindError", false).Return(nil, findError) + readerWriter.On("FindByName", testCtx, "IgnoreFound", false).Return(&models.Tag{ID: 103}, nil).Once() - readerWriter.On("UpdateParentTags", createID, []int{100}).Return(nil).Once() - readerWriter.On("UpdateParentTags", createFoundID, []int{101}).Return(nil).Once() - readerWriter.On("UpdateParentTags", failFoundID, []int{102}).Return(nil).Once() - readerWriter.On("UpdateParentTags", ignoreID, emptyParents).Return(nil).Once() - readerWriter.On("UpdateParentTags", ignoreFoundID, []int{103}).Return(nil).Once() + readerWriter.On("UpdateParentTags", testCtx, createID, []int{100}).Return(nil).Once() + readerWriter.On("UpdateParentTags", testCtx, createFoundID, []int{101}).Return(nil).Once() + readerWriter.On("UpdateParentTags", testCtx, failFoundID, []int{102}).Return(nil).Once() + readerWriter.On("UpdateParentTags", testCtx, ignoreID, emptyParents).Return(nil).Once() + readerWriter.On("UpdateParentTags", testCtx, ignoreFoundID, []int{103}).Return(nil).Once() - readerWriter.On("Create", mock.MatchedBy(func(t models.Tag) bool { return t.Name == "Create" })).Return(&models.Tag{ID: 100}, nil).Once() - readerWriter.On("Create", mock.MatchedBy(func(t models.Tag) bool { return t.Name == "CreateError" })).Return(nil, errors.New("failed creating parent")).Once() + readerWriter.On("Create", testCtx, mock.MatchedBy(func(t models.Tag) bool { return t.Name == "Create" })).Return(&models.Tag{ID: 100}, nil).Once() + readerWriter.On("Create", testCtx, mock.MatchedBy(func(t models.Tag) bool { return t.Name == "CreateError" })).Return(nil, errors.New("failed creating parent")).Once() i.MissingRefBehaviour = models.ImportMissingRefEnumCreate i.Input.Parents = []string{"Create"} - err := i.PostImport(createID) + err := i.PostImport(testCtx, createID) assert.Nil(t, err) i.Input.Parents = []string{"CreateError"} - err = i.PostImport(createErrorID) + err = i.PostImport(testCtx, createErrorID) assert.NotNil(t, err) i.Input.Parents = []string{"CreateFindError"} - err = i.PostImport(createFindErrorID) + err = i.PostImport(testCtx, createFindErrorID) assert.NotNil(t, err) i.Input.Parents = []string{"CreateFound"} - err = i.PostImport(createFoundID) + err = i.PostImport(testCtx, createFoundID) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumFail i.Input.Parents = []string{"Fail"} - err = i.PostImport(failID) + err = i.PostImport(testCtx, failID) assert.NotNil(t, err) i.Input.Parents = []string{"FailFindError"} - err = i.PostImport(failFindErrorID) + err = i.PostImport(testCtx, failFindErrorID) assert.NotNil(t, err) i.Input.Parents = []string{"FailFound"} - err = i.PostImport(failFoundID) + err = i.PostImport(testCtx, failFoundID) assert.Nil(t, err) i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore i.Input.Parents = []string{"Ignore"} - err = i.PostImport(ignoreID) + err = i.PostImport(testCtx, ignoreID) assert.Nil(t, err) i.Input.Parents = []string{"IgnoreFindError"} - err = i.PostImport(ignoreFindErrorID) + err = i.PostImport(testCtx, ignoreFindErrorID) assert.NotNil(t, err) i.Input.Parents = []string{"IgnoreFound"} - err = i.PostImport(ignoreFoundID) + err = i.PostImport(testCtx, ignoreFoundID) assert.Nil(t, err) readerWriter.AssertExpectations(t) @@ -209,23 +212,23 @@ func TestImporterFindExistingID(t *testing.T) { } errFindByName := errors.New("FindByName error") - readerWriter.On("FindByName", tagName, false).Return(nil, nil).Once() - readerWriter.On("FindByName", existingTagName, false).Return(&models.Tag{ + readerWriter.On("FindByName", testCtx, tagName, false).Return(nil, nil).Once() + readerWriter.On("FindByName", testCtx, existingTagName, false).Return(&models.Tag{ ID: existingTagID, }, nil).Once() - readerWriter.On("FindByName", tagNameErr, false).Return(nil, errFindByName).Once() + readerWriter.On("FindByName", testCtx, tagNameErr, false).Return(nil, errFindByName).Once() - id, err := i.FindExistingID() + id, err := i.FindExistingID(testCtx) assert.Nil(t, id) assert.Nil(t, err) i.Input.Name = existingTagName - id, err = i.FindExistingID() + id, err = i.FindExistingID(testCtx) assert.Equal(t, existingTagID, *id) assert.Nil(t, err) i.Input.Name = tagNameErr - id, err = i.FindExistingID() + id, err = i.FindExistingID(testCtx) assert.Nil(t, id) assert.NotNil(t, err) @@ -249,17 +252,17 @@ func TestCreate(t *testing.T) { } errCreate := errors.New("Create error") - readerWriter.On("Create", tag).Return(&models.Tag{ + readerWriter.On("Create", testCtx, tag).Return(&models.Tag{ ID: tagID, }, nil).Once() - readerWriter.On("Create", tagErr).Return(nil, errCreate).Once() + readerWriter.On("Create", testCtx, tagErr).Return(nil, errCreate).Once() - id, err := i.Create() + id, err := i.Create(testCtx) assert.Equal(t, tagID, *id) assert.Nil(t, err) i.tag = tagErr - id, err = i.Create() + id, err = i.Create(testCtx) assert.Nil(t, id) assert.NotNil(t, err) @@ -286,18 +289,18 @@ func TestUpdate(t *testing.T) { // id needs to be set for the mock input tag.ID = tagID - readerWriter.On("UpdateFull", tag).Return(nil, nil).Once() + readerWriter.On("UpdateFull", testCtx, tag).Return(nil, nil).Once() - err := i.Update(tagID) + err := i.Update(testCtx, tagID) assert.Nil(t, err) i.tag = tagErr // need to set id separately tagErr.ID = errImageID - readerWriter.On("UpdateFull", tagErr).Return(nil, errUpdate).Once() + readerWriter.On("UpdateFull", testCtx, tagErr).Return(nil, errUpdate).Once() - err = i.Update(errImageID) + err = i.Update(testCtx, errImageID) assert.NotNil(t, err) readerWriter.AssertExpectations(t) diff --git a/pkg/tag/query.go b/pkg/tag/query.go index ce7406403..a048054d7 100644 --- a/pkg/tag/query.go +++ b/pkg/tag/query.go @@ -1,8 +1,20 @@ package tag -import "github.com/stashapp/stash/pkg/models" +import ( + "context" -func ByName(qb models.TagReader, name string) (*models.Tag, error) { + "github.com/stashapp/stash/pkg/models" +) + +type Finder interface { + Find(ctx context.Context, id int) (*models.Tag, error) +} + +type Queryer interface { + Query(ctx context.Context, tagFilter *models.TagFilterType, findFilter *models.FindFilterType) ([]*models.Tag, int, error) +} + +func ByName(ctx context.Context, qb Queryer, name string) (*models.Tag, error) { f := &models.TagFilterType{ Name: &models.StringCriterionInput{ Value: name, @@ -11,7 +23,7 @@ func ByName(qb models.TagReader, name string) (*models.Tag, error) { } pp := 1 - ret, count, err := qb.Query(f, &models.FindFilterType{ + ret, count, err := qb.Query(ctx, f, &models.FindFilterType{ PerPage: &pp, }) @@ -26,7 +38,7 @@ func ByName(qb models.TagReader, name string) (*models.Tag, error) { return nil, nil } -func ByAlias(qb models.TagReader, alias string) (*models.Tag, error) { +func ByAlias(ctx context.Context, qb Queryer, alias string) (*models.Tag, error) { f := &models.TagFilterType{ Aliases: &models.StringCriterionInput{ Value: alias, @@ -35,7 +47,7 @@ func ByAlias(qb models.TagReader, alias string) (*models.Tag, error) { } pp := 1 - ret, count, err := qb.Query(f, &models.FindFilterType{ + ret, count, err := qb.Query(ctx, f, &models.FindFilterType{ PerPage: &pp, }) diff --git a/pkg/tag/update.go b/pkg/tag/update.go index dfee55154..0c219b26c 100644 --- a/pkg/tag/update.go +++ b/pkg/tag/update.go @@ -1,11 +1,17 @@ package tag import ( + "context" "fmt" "github.com/stashapp/stash/pkg/models" ) +type NameFinderCreator interface { + FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Tag, error) + Create(ctx context.Context, newTag models.Tag) (*models.Tag, error) +} + type NameExistsError struct { Name string } @@ -37,9 +43,9 @@ func (e *InvalidTagHierarchyError) Error() string { // EnsureTagNameUnique returns an error if the tag name provided // is used as a name or alias of another existing tag. -func EnsureTagNameUnique(id int, name string, qb models.TagReader) error { +func EnsureTagNameUnique(ctx context.Context, id int, name string, qb Queryer) error { // ensure name is unique - sameNameTag, err := ByName(qb, name) + sameNameTag, err := ByName(ctx, qb, name) if err != nil { return err } @@ -51,7 +57,7 @@ func EnsureTagNameUnique(id int, name string, qb models.TagReader) error { } // query by alias - sameNameTag, err = ByAlias(qb, name) + sameNameTag, err = ByAlias(ctx, qb, name) if err != nil { return err } @@ -66,9 +72,9 @@ func EnsureTagNameUnique(id int, name string, qb models.TagReader) error { return nil } -func EnsureAliasesUnique(id int, aliases []string, qb models.TagReader) error { +func EnsureAliasesUnique(ctx context.Context, id int, aliases []string, qb Queryer) error { for _, a := range aliases { - if err := EnsureTagNameUnique(id, a, qb); err != nil { + if err := EnsureTagNameUnique(ctx, id, a, qb); err != nil { return err } } @@ -76,12 +82,19 @@ func EnsureAliasesUnique(id int, aliases []string, qb models.TagReader) error { return nil } -func ValidateHierarchy(tag *models.Tag, parentIDs, childIDs []int, qb models.TagReader) error { +type RelationshipGetter interface { + FindAllAncestors(ctx context.Context, tagID int, excludeIDs []int) ([]*models.TagPath, error) + FindAllDescendants(ctx context.Context, tagID int, excludeIDs []int) ([]*models.TagPath, error) + FindByChildTagID(ctx context.Context, childID int) ([]*models.Tag, error) + FindByParentTagID(ctx context.Context, parentID int) ([]*models.Tag, error) +} + +func ValidateHierarchy(ctx context.Context, tag *models.Tag, parentIDs, childIDs []int, qb RelationshipGetter) error { id := tag.ID allAncestors := make(map[int]*models.TagPath) allDescendants := make(map[int]*models.TagPath) - parentsAncestors, err := qb.FindAllAncestors(id, nil) + parentsAncestors, err := qb.FindAllAncestors(ctx, id, nil) if err != nil { return err } @@ -90,7 +103,7 @@ func ValidateHierarchy(tag *models.Tag, parentIDs, childIDs []int, qb models.Tag allAncestors[ancestorTag.ID] = ancestorTag } - childsDescendants, err := qb.FindAllDescendants(id, nil) + childsDescendants, err := qb.FindAllDescendants(ctx, id, nil) if err != nil { return err } @@ -128,7 +141,7 @@ func ValidateHierarchy(tag *models.Tag, parentIDs, childIDs []int, qb models.Tag } if parentIDs == nil { - parentTags, err := qb.FindByChildTagID(id) + parentTags, err := qb.FindByChildTagID(ctx, id) if err != nil { return err } @@ -139,7 +152,7 @@ func ValidateHierarchy(tag *models.Tag, parentIDs, childIDs []int, qb models.Tag } if childIDs == nil { - childTags, err := qb.FindByParentTagID(id) + childTags, err := qb.FindByParentTagID(ctx, id) if err != nil { return err } @@ -164,7 +177,7 @@ func ValidateHierarchy(tag *models.Tag, parentIDs, childIDs []int, qb models.Tag return nil } -func MergeHierarchy(destination int, sources []int, qb models.TagReader) ([]int, []int, error) { +func MergeHierarchy(ctx context.Context, destination int, sources []int, qb RelationshipGetter) ([]int, []int, error) { var mergedParents, mergedChildren []int allIds := append([]int{destination}, sources...) @@ -192,14 +205,14 @@ func MergeHierarchy(destination int, sources []int, qb models.TagReader) ([]int, } for _, id := range allIds { - parents, err := qb.FindByChildTagID(id) + parents, err := qb.FindByChildTagID(ctx, id) if err != nil { return nil, nil, err } mergedParents = addTo(mergedParents, parents) - children, err := qb.FindByParentTagID(id) + children, err := qb.FindByParentTagID(ctx, id) if err != nil { return nil, nil, err } diff --git a/pkg/tag/update_test.go b/pkg/tag/update_test.go index f7338da23..4cc14e961 100644 --- a/pkg/tag/update_test.go +++ b/pkg/tag/update_test.go @@ -1,6 +1,7 @@ package tag import ( + "context" "fmt" "testing" @@ -219,6 +220,7 @@ func TestEnsureHierarchy(t *testing.T) { func testEnsureHierarchy(t *testing.T, tc testUniqueHierarchyCase, queryParents, queryChildren bool) { mockTagReader := &mocks.TagReaderWriter{} + ctx := context.Background() var parentIDs, childIDs []int find := make(map[int]*models.Tag) @@ -245,33 +247,33 @@ func testEnsureHierarchy(t *testing.T, tc testUniqueHierarchyCase, queryParents, if queryParents { parentIDs = nil - mockTagReader.On("FindByChildTagID", tc.id).Return(tc.parents, nil).Once() + mockTagReader.On("FindByChildTagID", ctx, tc.id).Return(tc.parents, nil).Once() } if queryChildren { childIDs = nil - mockTagReader.On("FindByParentTagID", tc.id).Return(tc.children, nil).Once() + mockTagReader.On("FindByParentTagID", ctx, tc.id).Return(tc.children, nil).Once() } - mockTagReader.On("FindAllAncestors", mock.AnythingOfType("int"), []int(nil)).Return(func(tagID int, excludeIDs []int) []*models.TagPath { + mockTagReader.On("FindAllAncestors", ctx, mock.AnythingOfType("int"), []int(nil)).Return(func(ctx context.Context, tagID int, excludeIDs []int) []*models.TagPath { return tc.onFindAllAncestors - }, func(tagID int, excludeIDs []int) error { + }, func(ctx context.Context, tagID int, excludeIDs []int) error { if tc.onFindAllAncestors != nil { return nil } return fmt.Errorf("undefined ancestors for: %d", tagID) }).Maybe() - mockTagReader.On("FindAllDescendants", mock.AnythingOfType("int"), []int(nil)).Return(func(tagID int, excludeIDs []int) []*models.TagPath { + mockTagReader.On("FindAllDescendants", ctx, mock.AnythingOfType("int"), []int(nil)).Return(func(ctx context.Context, tagID int, excludeIDs []int) []*models.TagPath { return tc.onFindAllDescendants - }, func(tagID int, excludeIDs []int) error { + }, func(ctx context.Context, tagID int, excludeIDs []int) error { if tc.onFindAllDescendants != nil { return nil } return fmt.Errorf("undefined descendants for: %d", tagID) }).Maybe() - res := ValidateHierarchy(testUniqueHierarchyTags[tc.id], parentIDs, childIDs, mockTagReader) + res := ValidateHierarchy(ctx, testUniqueHierarchyTags[tc.id], parentIDs, childIDs, mockTagReader) assert := assert.New(t) diff --git a/pkg/txn/transaction.go b/pkg/txn/transaction.go new file mode 100644 index 000000000..117e44eac --- /dev/null +++ b/pkg/txn/transaction.go @@ -0,0 +1,96 @@ +package txn + +import ( + "context" + "fmt" +) + +type Manager interface { + Begin(ctx context.Context) (context.Context, error) + Commit(ctx context.Context) error + Rollback(ctx context.Context) error + + IsLocked(err error) bool + + AddPostCommitHook(ctx context.Context, hook TxnFunc) + AddPostRollbackHook(ctx context.Context, hook TxnFunc) +} + +type DatabaseProvider interface { + WithDatabase(ctx context.Context) (context.Context, error) +} + +type TxnFunc func(ctx context.Context) error + +// WithTxn executes fn in a transaction. If fn returns an error then +// the transaction is rolled back. Otherwise it is committed. +func WithTxn(ctx context.Context, m Manager, fn TxnFunc) error { + var err error + ctx, err = m.Begin(ctx) + if err != nil { + return err + } + + defer func() { + if p := recover(); p != nil { + // a panic occurred, rollback and repanic + _ = m.Rollback(ctx) + panic(p) + } + + if err != nil { + // something went wrong, rollback + _ = m.Rollback(ctx) + } else { + // all good, commit + err = m.Commit(ctx) + } + }() + + err = fn(ctx) + return err +} + +// WithDatabase executes fn with the context provided by p.WithDatabase. +// It does not run inside a transaction, so all database operations will be +// executed in their own transaction. +func WithDatabase(ctx context.Context, p DatabaseProvider, fn TxnFunc) error { + var err error + ctx, err = p.WithDatabase(ctx) + if err != nil { + return err + } + + return fn(ctx) +} + +type Retryer struct { + Manager Manager + // use value < 0 to retry forever + Retries int + OnFail func(ctx context.Context, err error, attempt int) error +} + +func (r Retryer) WithTxn(ctx context.Context, fn TxnFunc) error { + var attempt int + var err error + for attempt = 1; attempt <= r.Retries || r.Retries < 0; attempt++ { + err = WithTxn(ctx, r.Manager, fn) + + if err == nil { + return nil + } + + if !r.Manager.IsLocked(err) { + return err + } + + if r.OnFail != nil { + if err := r.OnFail(ctx, err, attempt); err != nil { + return err + } + } + } + + return fmt.Errorf("failed after %d attempts: %w", attempt, err) +} diff --git a/pkg/utils/phash.go b/pkg/utils/phash.go index 59d9e0016..7b15ec5e0 100644 --- a/pkg/utils/phash.go +++ b/pkg/utils/phash.go @@ -4,6 +4,7 @@ import ( "strconv" "github.com/corona10/goimagehash" + "github.com/stashapp/stash/pkg/sliceutil/intslice" ) type Phash struct { @@ -17,7 +18,7 @@ func FindDuplicates(hashes []*Phash, distance int) [][]int { for i, scene := range hashes { sceneHash := goimagehash.NewImageHash(uint64(scene.Hash), goimagehash.PHash) for j, neighbor := range hashes { - if i != j { + if i != j && scene.SceneID != neighbor.SceneID { neighborHash := goimagehash.NewImageHash(uint64(neighbor.Hash), goimagehash.PHash) neighborDistance, _ := sceneHash.Distance(neighborHash) if neighborDistance <= distance { @@ -34,7 +35,10 @@ func FindDuplicates(hashes []*Phash, distance int) [][]int { scenes := []int{scene.SceneID} scene.Bucket = bucket findNeighbors(bucket, scene.Neighbors, hashes, &scenes) - buckets = append(buckets, scenes) + + if len(scenes) > 1 { + buckets = append(buckets, scenes) + } } } @@ -46,7 +50,7 @@ func findNeighbors(bucket int, neighbors []int, hashes []*Phash, scenes *[]int) hash := hashes[id] if hash.Bucket == -1 { hash.Bucket = bucket - *scenes = append(*scenes, hash.SceneID) + *scenes = intslice.IntAppendUnique(*scenes, hash.SceneID) findNeighbors(bucket, hash.Neighbors, hashes, scenes) } } diff --git a/pkg/utils/strings_test.go b/pkg/utils/strings_test.go new file mode 100644 index 000000000..92af02fdc --- /dev/null +++ b/pkg/utils/strings_test.go @@ -0,0 +1,12 @@ +package utils + +import "fmt" + +func ExampleStrFormat() { + fmt.Println(StrFormat("{foo} bar {baz}", StrFormatMap{ + "foo": "bar", + "baz": "abc", + })) + // Output: + // bar bar abc +} diff --git a/scripts/test_db_generator/makeTestDB.go b/scripts/test_db_generator/makeTestDB.go index db095845b..075c809ee 100644 --- a/scripts/test_db_generator/makeTestDB.go +++ b/scripts/test_db_generator/makeTestDB.go @@ -1,5 +1,5 @@ -//go:build ignore -// +build ignore +//go:build tools +// +build tools package main @@ -11,15 +11,17 @@ import ( "math" "math/rand" "os" + "path" "strconv" "time" - "github.com/stashapp/stash/pkg/database" + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/hash/md5" - "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/sliceutil/intslice" "github.com/stashapp/stash/pkg/sqlite" + "github.com/stashapp/stash/pkg/txn" "gopkg.in/yaml.v2" ) @@ -40,8 +42,12 @@ type config struct { Naming namingConfig `yaml:"naming"` } -var txnManager models.TransactionManager -var c *config +var ( + repo models.Repository + c *config + db *sqlite.Database + folderID file.FolderID +) func main() { rand.Seed(time.Now().UnixNano()) @@ -54,9 +60,14 @@ func main() { initNaming(*c) - if err = database.Initialize(c.Database); err != nil { + db = sqlite.NewDatabase() + repo = db.TxnRepository() + + logf("Initializing database...") + if err = db.Open(c.Database); err != nil { log.Fatalf("couldn't initialize database: %v", err) } + logf("Populating database...") populateDB() } @@ -89,12 +100,8 @@ func populateDB() { makeMarkers(c.Markers) } -func withTxn(f func(r models.Repository) error) error { - if txnManager == nil { - txnManager = sqlite.NewTransactionManager() - } - - return txnManager.WithTxn(context.TODO(), f) +func withTxn(f func(ctx context.Context) error) error { + return txn.WithTxn(context.Background(), db, f) } func retry(attempts int, fn func() error) error { @@ -109,28 +116,64 @@ func retry(attempts int, fn func() error) error { return err } +func getOrCreateFolder(ctx context.Context, p string) (*file.Folder, error) { + ret, err := repo.Folder.FindByPath(ctx, p) + if err != nil { + return nil, err + } + + if ret != nil { + return ret, nil + } + + var parentID *file.FolderID + + if p != "." { + parent := path.Dir(p) + parentFolder, err := getOrCreateFolder(ctx, parent) + if err != nil { + return nil, err + } + + parentID = &parentFolder.ID + } + + f := file.Folder{ + Path: p, + ParentFolderID: parentID, + } + + if err := repo.Folder.Create(ctx, &f); err != nil { + return nil, err + } + + ret = &f + return ret, nil +} + func makeTags(n int) { + logf("creating %d tags...", n) for i := 0; i < n; i++ { if err := retry(100, func() error { - return withTxn(func(r models.Repository) error { + return withTxn(func(ctx context.Context) error { name := names[c.Naming.Tags].generateName(1) tag := models.Tag{ Name: name, } - created, err := r.Tag().Create(tag) + created, err := repo.Tag.Create(ctx, tag) if err != nil { return err } if rand.Intn(100) > 5 { - t, _, err := r.Tag().Query(nil, getRandomFilter(1)) + t, _, err := repo.Tag.Query(ctx, nil, getRandomFilter(1)) if err != nil { return err } if len(t) > 0 && t[0].ID != created.ID { - if err := r.Tag().UpdateParentTags(created.ID, []int{t[0].ID}); err != nil { + if err := repo.Tag.UpdateParentTags(ctx, created.ID, []int{t[0].ID}); err != nil { return err } } @@ -145,9 +188,10 @@ func makeTags(n int) { } func makeStudios(n int) { + logf("creating %d studios...", n) for i := 0; i < n; i++ { if err := retry(100, func() error { - return withTxn(func(r models.Repository) error { + return withTxn(func(ctx context.Context) error { name := names[c.Naming.Tags].generateName(rand.Intn(5) + 1) studio := models.Studio{ Name: sql.NullString{String: name, Valid: true}, @@ -155,7 +199,7 @@ func makeStudios(n int) { } if rand.Intn(100) > 5 { - ss, _, err := r.Studio().Query(nil, getRandomFilter(1)) + ss, _, err := repo.Studio.Query(ctx, nil, getRandomFilter(1)) if err != nil { return err } @@ -168,7 +212,7 @@ func makeStudios(n int) { } } - _, err := r.Studio().Create(studio) + _, err := repo.Studio.Create(ctx, studio) return err }) }); err != nil { @@ -178,9 +222,10 @@ func makeStudios(n int) { } func makePerformers(n int) { + logf("creating %d performers...", n) for i := 0; i < n; i++ { if err := retry(100, func() error { - return withTxn(func(r models.Repository) error { + return withTxn(func(ctx context.Context) error { name := generatePerformerName() performer := models.Performer{ Name: sql.NullString{String: name, Valid: true}, @@ -193,7 +238,7 @@ func makePerformers(n int) { // TODO - set tags - _, err := r.Performer().Create(performer) + _, err := repo.Performer.Create(ctx, performer) if err != nil { err = fmt.Errorf("error creating performer with name: %s: %s", performer.Name.String, err.Error()) } @@ -205,23 +250,77 @@ func makePerformers(n int) { } } +func generateBaseFile(parentFolderID file.FolderID, path string) *file.BaseFile { + return &file.BaseFile{ + Basename: path, + ParentFolderID: parentFolderID, + Fingerprints: []file.Fingerprint{ + file.Fingerprint{ + Type: "md5", + Fingerprint: md5.FromString(path), + }, + file.Fingerprint{ + Type: "oshash", + Fingerprint: md5.FromString(path), + }, + }, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } +} + +func generateVideoFile(parentFolderID file.FolderID, path string) file.File { + w, h := getResolution() + + return &file.VideoFile{ + BaseFile: generateBaseFile(parentFolderID, path), + Duration: rand.Float64() * 14400, + Height: h, + Width: w, + } +} + +func makeVideoFile(ctx context.Context, path string) (file.File, error) { + folderPath := fsutil.GetIntraDir(path, 2, 2) + parentFolder, err := getOrCreateFolder(ctx, folderPath) + if err != nil { + return nil, err + } + + f := generateVideoFile(parentFolder.ID, path) + + if err := repo.File.Create(ctx, f); err != nil { + return nil, err + } + + return f, nil +} + +func logf(f string, args ...interface{}) { + log.Printf(f+"\n", args...) +} + func makeScenes(n int) { - logger.Infof("creating %d scenes...", n) + logf("creating %d scenes...", n) for i := 0; i < n; { // do in batches of 1000 batch := i + batchSize - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { for ; i < batch && i < n; i++ { scene := generateScene(i) - scene.StudioID = getRandomStudioID(r) + scene.StudioID = getRandomStudioID(ctx) + makeSceneRelationships(ctx, &scene) - created, err := r.Scene().Create(scene) + path := md5.FromString("scene/" + strconv.Itoa(i)) + f, err := makeVideoFile(ctx, path) if err != nil { return err } - makeSceneRelationships(r, created.ID) + if err := repo.Scene.Create(ctx, &scene, []file.ID{f.Base().ID}); err != nil { + return err + } } return nil @@ -229,14 +328,14 @@ func makeScenes(n int) { panic(err) } - logger.Infof("... created %d scenes", i) + logf("... created %d scenes", i) } } -func getResolution() (int64, int64) { +func getResolution() (int, int) { res := models.AllResolutionEnum[rand.Intn(len(models.AllResolutionEnum))] - h := int64(res.GetMaxResolution()) - var w int64 + h := res.GetMaxResolution() + var w int if h == 240 || h == 480 || rand.Intn(10) == 9 { w = h * 4 / 3 } else { @@ -250,54 +349,75 @@ func getResolution() (int64, int64) { return w, h } -func getDate() string { +func getDate() time.Time { s := rand.Int63n(time.Now().Unix()) - d := time.Unix(s, 0) - return d.Format("2006-01-02") + return time.Unix(s, 0) } func generateScene(i int) models.Scene { - path := md5.FromString("scene/" + strconv.Itoa(i)) - w, h := getResolution() - return models.Scene{ - Path: path, - Title: sql.NullString{String: names[c.Naming.Scenes].generateName(rand.Intn(7) + 1), Valid: true}, - Checksum: sql.NullString{String: md5.FromString(path), Valid: true}, - OSHash: sql.NullString{String: md5.FromString(path), Valid: true}, - Duration: sql.NullFloat64{ - Float64: rand.Float64() * 14400, - Valid: true, - }, - Height: models.NullInt64(h), - Width: models.NullInt64(w), - Date: models.SQLiteDate{ - String: getDate(), - Valid: true, + Title: names[c.Naming.Scenes].generateName(rand.Intn(7) + 1), + Date: &models.Date{ + Time: getDate(), }, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), } } +func generateImageFile(parentFolderID file.FolderID, path string) file.File { + w, h := getResolution() + + return &file.ImageFile{ + BaseFile: generateBaseFile(parentFolderID, path), + Height: h, + Width: w, + } +} + +func makeImageFile(ctx context.Context, path string) (file.File, error) { + folderPath := fsutil.GetIntraDir(path, 2, 2) + parentFolder, err := getOrCreateFolder(ctx, folderPath) + if err != nil { + return nil, err + } + + f := generateImageFile(parentFolder.ID, path) + + if err := repo.File.Create(ctx, f); err != nil { + return nil, err + } + + return f, nil +} + func makeImages(n int) { - logger.Infof("creating %d images...", n) + logf("creating %d images...", n) for i := 0; i < n; { // do in batches of 1000 batch := i + batchSize - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { for ; i < batch && i < n; i++ { image := generateImage(i) - image.StudioID = getRandomStudioID(r) + image.StudioID = getRandomStudioID(ctx) + makeImageRelationships(ctx, &image) - created, err := r.Image().Create(image) + path := md5.FromString("image/" + strconv.Itoa(i)) + f, err := makeImageFile(ctx, path) if err != nil { return err } - makeImageRelationships(r, created.ID) + if err := repo.Image.Create(ctx, &models.ImageCreateInput{ + Image: &image, + FileIDs: []file.ID{f.Base().ID}, + }); err != nil { + return err + } } - logger.Infof("... created %d images", i) + logf("... created %d images", i) return nil }); err != nil { @@ -307,36 +427,37 @@ func makeImages(n int) { } func generateImage(i int) models.Image { - path := md5.FromString("image/" + strconv.Itoa(i)) - - w, h := getResolution() - return models.Image{ - Title: sql.NullString{String: names[c.Naming.Images].generateName(rand.Intn(7) + 1), Valid: true}, - Path: path, - Checksum: md5.FromString(path), - Height: models.NullInt64(h), - Width: models.NullInt64(w), + Title: names[c.Naming.Images].generateName(rand.Intn(7) + 1), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), } } func makeGalleries(n int) { - logger.Infof("creating %d galleries...", n) + logf("creating %d galleries...", n) for i := 0; i < n; { // do in batches of 1000 batch := i + batchSize - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { for ; i < batch && i < n; i++ { gallery := generateGallery(i) - gallery.StudioID = getRandomStudioID(r) + gallery.StudioID = getRandomStudioID(ctx) + gallery.TagIDs = getRandomTags(ctx, 0, 15) + gallery.PerformerIDs = getRandomPerformers(ctx) - created, err := r.Gallery().Create(gallery) + path := md5.FromString("gallery/" + strconv.Itoa(i)) + f, err := makeZipFile(ctx, path) if err != nil { return err } - makeGalleryRelationships(r, created.ID) + if err := repo.Gallery.Create(ctx, &gallery, []file.ID{f.Base().ID}); err != nil { + return err + } + + makeGalleryRelationships(ctx, &gallery) } return nil @@ -344,49 +465,66 @@ func makeGalleries(n int) { panic(err) } - logger.Infof("... created %d galleries", i) + logf("... created %d galleries", i) } } -func generateGallery(i int) models.Gallery { - path := md5.FromString("gallery/" + strconv.Itoa(i)) +func generateZipFile(parentFolderID file.FolderID, path string) file.File { + return generateBaseFile(parentFolderID, path) +} +func makeZipFile(ctx context.Context, path string) (file.File, error) { + folderPath := fsutil.GetIntraDir(path, 2, 2) + parentFolder, err := getOrCreateFolder(ctx, folderPath) + if err != nil { + return nil, err + } + + f := generateZipFile(parentFolder.ID, path) + + if err := repo.File.Create(ctx, f); err != nil { + return nil, err + } + + return f, nil +} + +func generateGallery(i int) models.Gallery { return models.Gallery{ - Title: sql.NullString{String: names[c.Naming.Galleries].generateName(rand.Intn(7) + 1), Valid: true}, - Path: sql.NullString{String: path, Valid: true}, - Checksum: md5.FromString(path), - Date: models.SQLiteDate{ - String: getDate(), - Valid: true, + Title: names[c.Naming.Galleries].generateName(rand.Intn(7) + 1), + Date: &models.Date{ + Time: getDate(), }, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), } } func makeMarkers(n int) { - logger.Infof("creating %d markers...", n) + logf("creating %d markers...", n) for i := 0; i < n; { // do in batches of 1000 batch := i + batchSize - if err := withTxn(func(r models.Repository) error { + if err := withTxn(func(ctx context.Context) error { for ; i < batch && i < n; i++ { marker := generateMarker(i) marker.SceneID = models.NullInt64(int64(getRandomScene())) - marker.PrimaryTagID = getRandomTags(r, 1, 1)[0] + marker.PrimaryTagID = getRandomTags(ctx, 1, 1)[0] - created, err := r.SceneMarker().Create(marker) + created, err := repo.SceneMarker.Create(ctx, marker) if err != nil { return err } - tags := getRandomTags(r, 0, 5) + tags := getRandomTags(ctx, 0, 5) // remove primary tag tags = intslice.IntExclude(tags, []int{marker.PrimaryTagID}) - if err := r.SceneMarker().UpdateTags(created.ID, tags); err != nil { + if err := repo.SceneMarker.UpdateTags(ctx, created.ID, tags); err != nil { return err } } - logger.Infof("... created %d markers", i) + logf("... created %d markers", i) return nil }); err != nil { @@ -410,9 +548,9 @@ func getRandomFilter(n int) *models.FindFilterType { } } -func getRandomStudioID(r models.Repository) sql.NullInt64 { +func getRandomStudioID(ctx context.Context) *int { if rand.Intn(10) == 0 { - return sql.NullInt64{} + return nil } // s, _, err := r.Studio().Query(nil, getRandomFilter(1)) @@ -420,82 +558,44 @@ func getRandomStudioID(r models.Repository) sql.NullInt64 { // panic(err) // } - return sql.NullInt64{ - Int64: int64(rand.Int63n(int64(c.Studios)) + 1), - Valid: true, - } + v := rand.Intn(c.Studios) + 1 + return &v } -func makeSceneRelationships(r models.Repository, id int) { +func makeSceneRelationships(ctx context.Context, s *models.Scene) { // add tags - tagIDs := getRandomTags(r, 0, 15) - if len(tagIDs) > 0 { - if err := r.Scene().UpdateTags(id, tagIDs); err != nil { - panic(err) - } - } + s.TagIDs = getRandomTags(ctx, 0, 15) // add performers - performerIDs := getRandomPerformers(r) - if len(tagIDs) > 0 { - if err := r.Scene().UpdatePerformers(id, performerIDs); err != nil { - panic(err) - } - } + s.PerformerIDs = getRandomPerformers(ctx) } -func makeImageRelationships(r models.Repository, id int) { +func makeImageRelationships(ctx context.Context, i *models.Image) { // there are typically many more images. For performance reasons // only a small proportion should have tags/performers // add tags if rand.Intn(100) == 0 { - tagIDs := getRandomTags(r, 1, 15) - if len(tagIDs) > 0 { - if err := r.Image().UpdateTags(id, tagIDs); err != nil { - panic(err) - } - } + i.TagIDs = getRandomTags(ctx, 1, 15) } // add performers if rand.Intn(100) <= 1 { - performerIDs := getRandomPerformers(r) - if len(performerIDs) > 0 { - if err := r.Image().UpdatePerformers(id, performerIDs); err != nil { - panic(err) - } - } + i.PerformerIDs = getRandomPerformers(ctx) } } -func makeGalleryRelationships(r models.Repository, id int) { - // add tags - tagIDs := getRandomTags(r, 0, 15) - if len(tagIDs) > 0 { - if err := r.Gallery().UpdateTags(id, tagIDs); err != nil { - panic(err) - } - } - - // add performers - performerIDs := getRandomPerformers(r) - if len(tagIDs) > 0 { - if err := r.Gallery().UpdatePerformers(id, performerIDs); err != nil { - panic(err) - } - } - +func makeGalleryRelationships(ctx context.Context, g *models.Gallery) { // add images - imageIDs := getRandomImages(r) - if len(tagIDs) > 0 { - if err := r.Gallery().UpdateImages(id, imageIDs); err != nil { + imageIDs := getRandomImages(ctx) + if len(imageIDs) > 0 { + if err := repo.Gallery.UpdateImages(ctx, g.ID, imageIDs); err != nil { panic(err) } } } -func getRandomPerformers(r models.Repository) []int { +func getRandomPerformers(ctx context.Context) []int { n := rand.Intn(5) var ret []int @@ -521,7 +621,7 @@ func getRandomScene() int { return rand.Intn(c.Scenes) + 1 } -func getRandomTags(r models.Repository, min, max int) []int { +func getRandomTags(ctx context.Context, min, max int) []int { var n int if min == max { n = min @@ -548,7 +648,7 @@ func getRandomTags(r models.Repository, min, max int) []int { return ret } -func getRandomImages(r models.Repository) []int { +func getRandomImages(ctx context.Context) []int { n := rand.Intn(500) var ret []int diff --git a/scripts/test_db_generator/naming.go b/scripts/test_db_generator/naming.go index cc016dce5..8923a1500 100644 --- a/scripts/test_db_generator/naming.go +++ b/scripts/test_db_generator/naming.go @@ -1,4 +1,5 @@ -// +build ignore +//go:build tools +// +build tools package main diff --git a/tools.go b/tools.go index fa219bb53..adc47d7e9 100644 --- a/tools.go +++ b/tools.go @@ -6,5 +6,6 @@ package main import ( _ "github.com/99designs/gqlgen" _ "github.com/Yamashou/gqlgenc" + _ "github.com/vektah/dataloaden" _ "github.com/vektra/mockery/v2" ) diff --git a/ui/v2.5/src/App.tsx b/ui/v2.5/src/App.tsx index 31a6a400e..1ef2e306a 100755 --- a/ui/v2.5/src/App.tsx +++ b/ui/v2.5/src/App.tsx @@ -9,7 +9,11 @@ import LightboxProvider from "src/hooks/Lightbox/context"; import { initPolyfills } from "src/polyfills"; import locales from "src/locales"; -import { useConfiguration, useSystemStatus } from "src/core/StashService"; +import { + useConfiguration, + useConfigureUI, + useSystemStatus, +} from "src/core/StashService"; import { flattenMessages } from "src/utils"; import Mousetrap from "mousetrap"; import MousetrapPause from "mousetrap-pause"; @@ -22,6 +26,9 @@ import { LoadingIndicator, TITLE_SUFFIX } from "./components/Shared"; import { ConfigurationProvider } from "./hooks/Config"; import { ManualProvider } from "./components/Help/context"; import { InteractiveProvider } from "./hooks/Interactive/context"; +import { ReleaseNotesDialog } from "./components/Dialogs/ReleaseNotesDialog"; +import { IUIConfig } from "./core/config"; +import { releaseNotes } from "./docs/en/ReleaseNotes"; const Performers = lazy(() => import("./components/Performers/Performers")); const FrontPage = lazy(() => import("./components/FrontPage/FrontPage")); @@ -62,6 +69,8 @@ function languageMessageString(language: string) { export const App: React.FC = () => { const config = useConfiguration(); + const [saveUI] = useConfigureUI(); + const { data: systemStatusData } = useSystemStatus(); const language = @@ -161,6 +170,36 @@ export const App: React.FC = () => { ); } + function maybeRenderReleaseNotes() { + if (setupMatch || config.loading || config.error) { + return; + } + + const lastNoteSeen = (config.data?.configuration.ui as IUIConfig) + ?.lastNoteSeen; + const notes = releaseNotes.filter((n) => { + return !lastNoteSeen || n.date > lastNoteSeen; + }); + + if (notes.length === 0) return; + + return ( + n.content)} + onClose={() => { + saveUI({ + variables: { + input: { + ...config.data?.configuration.ui, + lastNoteSeen: notes[0].date, + }, + }, + }); + }} + /> + ); + } + return ( {messages ? ( @@ -173,6 +212,7 @@ export const App: React.FC = () => { configuration={config.data?.configuration} loading={config.loading} > + {maybeRenderReleaseNotes()} }> diff --git a/ui/v2.5/src/components/Changelog/Changelog.tsx b/ui/v2.5/src/components/Changelog/Changelog.tsx index da38f5c7c..24e5cee5f 100644 --- a/ui/v2.5/src/components/Changelog/Changelog.tsx +++ b/ui/v2.5/src/components/Changelog/Changelog.tsx @@ -1,26 +1,27 @@ import React from "react"; import { useChangelogStorage } from "src/hooks"; import Version from "./Version"; -import V010 from "./versions/v010.md"; -import V011 from "./versions/v011.md"; -import V020 from "./versions/v020.md"; -import V021 from "./versions/v021.md"; -import V030 from "./versions/v030.md"; -import V040 from "./versions/v040.md"; -import V050 from "./versions/v050.md"; -import V060 from "./versions/v060.md"; -import V070 from "./versions/v070.md"; -import V080 from "./versions/v080.md"; -import V090 from "./versions/v090.md"; -import V0100 from "./versions/v0100.md"; -import V0110 from "./versions/v0110.md"; -import V0120 from "./versions/v0120.md"; -import V0130 from "./versions/v0130.md"; -import V0131 from "./versions/v0131.md"; -import V0140 from "./versions/v0140.md"; -import V0150 from "./versions/v0150.md"; -import V0160 from "./versions/v0160.md"; -import V0161 from "./versions/v0161.md"; +import V010 from "src/docs/en/Changelog/v010.md"; +import V011 from "src/docs/en/Changelog/v011.md"; +import V020 from "src/docs/en/Changelog/v020.md"; +import V021 from "src/docs/en/Changelog/v021.md"; +import V030 from "src/docs/en/Changelog/v030.md"; +import V040 from "src/docs/en/Changelog/v040.md"; +import V050 from "src/docs/en/Changelog/v050.md"; +import V060 from "src/docs/en/Changelog/v060.md"; +import V070 from "src/docs/en/Changelog/v070.md"; +import V080 from "src/docs/en/Changelog/v080.md"; +import V090 from "src/docs/en/Changelog/v090.md"; +import V0100 from "src/docs/en/Changelog/v0100.md"; +import V0110 from "src/docs/en/Changelog/v0110.md"; +import V0120 from "src/docs/en/Changelog/v0120.md"; +import V0130 from "src/docs/en/Changelog/v0130.md"; +import V0131 from "src/docs/en/Changelog/v0131.md"; +import V0140 from "src/docs/en/Changelog/v0140.md"; +import V0150 from "src/docs/en/Changelog/v0150.md"; +import V0160 from "src/docs/en/Changelog/v0160.md"; +import V0161 from "src/docs/en/Changelog/v0161.md"; +import V0170 from "src/docs/en/Changelog/v0170.md"; import { MarkdownPage } from "../Shared/MarkdownPage"; // to avoid use of explicit any @@ -59,9 +60,9 @@ const Changelog: React.FC = () => { // after new release: // add entry to releases, using the current* fields // then update the current fields. - const currentVersion = stashVersion || "v0.16.1"; + const currentVersion = stashVersion || "v0.17.0"; const currentDate = buildDate; - const currentPage = V0161; + const currentPage = V0170; const releases: IStashRelease[] = [ { @@ -70,6 +71,11 @@ const Changelog: React.FC = () => { page: currentPage, defaultOpen: true, }, + { + version: "v0.16.1", + date: "2022-07-26", + page: V0161, + }, { version: "v0.16.0", date: "2022-07-05", @@ -168,7 +174,7 @@ const Changelog: React.FC = () => { ]; return ( - <> +

Changelog:

{releases.map((r) => ( { ))} - +
); }; diff --git a/ui/v2.5/src/components/Changelog/styles.scss b/ui/v2.5/src/components/Changelog/styles.scss index abdf45545..07c88f698 100644 --- a/ui/v2.5/src/components/Changelog/styles.scss +++ b/ui/v2.5/src/components/Changelog/styles.scss @@ -1,6 +1,5 @@ .changelog { margin-bottom: 4rem; - margin-top: 4rem; .btn { color: inherit; diff --git a/ui/v2.5/src/components/Dialogs/ReleaseNotesDialog.tsx b/ui/v2.5/src/components/Dialogs/ReleaseNotesDialog.tsx new file mode 100644 index 000000000..fefc344be --- /dev/null +++ b/ui/v2.5/src/components/Dialogs/ReleaseNotesDialog.tsx @@ -0,0 +1,39 @@ +import React from "react"; +import { Form } from "react-bootstrap"; +import { Modal } from "src/components/Shared"; +import { faCogs } from "@fortawesome/free-solid-svg-icons"; +import { useIntl } from "react-intl"; +import { MarkdownPage } from "../Shared/MarkdownPage"; +import { Module } from "src/docs/en/ReleaseNotes"; + +interface IReleaseNotesDialog { + notes: Module[]; + onClose: () => void; +} + +export const ReleaseNotesDialog: React.FC = ({ + notes, + onClose, +}) => { + const intl = useIntl(); + + return ( + +
+ {notes.map((n, i) => ( + + ))} + +
+ ); +}; + +export default ReleaseNotesDialog; diff --git a/ui/v2.5/src/components/FrontPage/FrontPage.tsx b/ui/v2.5/src/components/FrontPage/FrontPage.tsx index cddebca9d..d68e2ca00 100644 --- a/ui/v2.5/src/components/FrontPage/FrontPage.tsx +++ b/ui/v2.5/src/components/FrontPage/FrontPage.tsx @@ -36,6 +36,7 @@ const FrontPage: React.FC = () => { await saveUI({ variables: { input: { + ...configuration?.ui, frontPageContent: content, }, }, diff --git a/ui/v2.5/src/components/Galleries/DeleteGalleriesDialog.tsx b/ui/v2.5/src/components/Galleries/DeleteGalleriesDialog.tsx index d4122be78..4e128dd28 100644 --- a/ui/v2.5/src/components/Galleries/DeleteGalleriesDialog.tsx +++ b/ui/v2.5/src/components/Galleries/DeleteGalleriesDialog.tsx @@ -73,8 +73,14 @@ export const DeleteGalleriesDialog: React.FC = ( return; } - const fsGalleries = props.selected.filter((g) => g.path); - if (fsGalleries.length === 0) { + const deletedFiles: string[] = []; + + props.selected.forEach((s) => { + const paths = s.files.map((f) => f.path); + deletedFiles.push(...paths); + }); + + if (deletedFiles.length === 0) { return; } @@ -83,7 +89,7 @@ export const DeleteGalleriesDialog: React.FC = (

= ( />

    - {fsGalleries.slice(0, 5).map((s) => ( -
  • {s.path}
  • + {deletedFiles.slice(0, 5).map((s) => ( +
  • {s}
  • ))} - {fsGalleries.length > 5 && ( + {deletedFiles.length > 5 && ( = (props) => { diff --git a/ui/v2.5/src/components/Galleries/GalleryDetails/Gallery.tsx b/ui/v2.5/src/components/Galleries/GalleryDetails/Gallery.tsx index 1ef62af37..1cf4f6c46 100644 --- a/ui/v2.5/src/components/Galleries/GalleryDetails/Gallery.tsx +++ b/ui/v2.5/src/components/Galleries/GalleryDetails/Gallery.tsx @@ -1,5 +1,5 @@ import { Tab, Nav, Dropdown } from "react-bootstrap"; -import React, { useEffect, useState } from "react"; +import React, { useEffect, useMemo, useState } from "react"; import { useParams, useHistory, Link } from "react-router-dom"; import { FormattedMessage, useIntl } from "react-intl"; import { Helmet } from "react-helmet"; @@ -10,7 +10,6 @@ import { useGalleryUpdate, } from "src/core/StashService"; import { ErrorMessage, LoadingIndicator, Icon } from "src/components/Shared"; -import { TextUtils } from "src/utils"; import Mousetrap from "mousetrap"; import { useToast } from "src/hooks"; import { OrganizedButton } from "src/components/Scenes/SceneDetails/OrganizedButton"; @@ -22,6 +21,7 @@ import { GalleryAddPanel } from "./GalleryAddPanel"; import { GalleryFileInfoPanel } from "./GalleryFileInfoPanel"; import { GalleryScenesPanel } from "./GalleryScenesPanel"; import { faEllipsisV } from "@fortawesome/free-solid-svg-icons"; +import { galleryPath, galleryTitle } from "src/core/galleries"; interface IProps { gallery: GQL.GalleryDataFragment; @@ -46,6 +46,8 @@ export const GalleryPage: React.FC = ({ gallery }) => { } }; + const path = useMemo(() => galleryPath(gallery), [gallery]); + const [updateGallery] = useGalleryUpdate(); const [organizedLoading, setOrganizedLoading] = useState(false); @@ -69,12 +71,12 @@ export const GalleryPage: React.FC = ({ gallery }) => { }; async function onRescan() { - if (!gallery || !gallery.path) { + if (!gallery || !path) { return; } await mutateMetadataScan({ - paths: [gallery.path], + paths: [path], }); Toast.success({ @@ -120,7 +122,7 @@ export const GalleryPage: React.FC = ({ gallery }) => { - {gallery.path ? ( + {path ? ( = ({ gallery }) => { )} - {gallery.path ? ( + {path ? ( @@ -270,12 +272,12 @@ export const GalleryPage: React.FC = ({ gallery }) => { }; }); + const title = galleryTitle(gallery); + return (
    - - {gallery.title ?? TextUtils.fileNameFromPath(gallery.path ?? "")} - + {title} {maybeRenderDeleteDialog()}
    @@ -291,9 +293,7 @@ export const GalleryPage: React.FC = ({ gallery }) => { )} -

    - {gallery.title ?? TextUtils.fileNameFromPath(gallery.path ?? "")} -

    +

    {title}

    {renderTabs()}
    diff --git a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryAddPanel.tsx b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryAddPanel.tsx index 15343f7e4..04a9db305 100644 --- a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryAddPanel.tsx +++ b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryAddPanel.tsx @@ -6,9 +6,9 @@ import { ImageList } from "src/components/Images/ImageList"; import { showWhenSelected } from "src/hooks/ListHook"; import { mutateAddGalleryImages } from "src/core/StashService"; import { useToast } from "src/hooks"; -import { TextUtils } from "src/utils"; import { useIntl } from "react-intl"; import { faPlus } from "@fortawesome/free-solid-svg-icons"; +import { galleryTitle } from "src/core/galleries"; interface IGalleryAddProps { gallery: GQL.GalleryDataFragment; @@ -21,7 +21,7 @@ export const GalleryAddPanel: React.FC = ({ gallery }) => { function filterHook(filter: ListFilterModel) { const galleryValue = { id: gallery.id, - label: gallery.title ?? TextUtils.fileNameFromPath(gallery.path ?? ""), + label: galleryTitle(gallery), }; // if galleries is already present, then we modify it, otherwise add let galleryCriterion = filter.criteria.find((c) => { diff --git a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryDetailPanel.tsx b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryDetailPanel.tsx index 46f00b4e1..049490b35 100644 --- a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryDetailPanel.tsx +++ b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryDetailPanel.tsx @@ -7,6 +7,7 @@ import { TagLink, TruncatedText } from "src/components/Shared"; import { PerformerCard } from "src/components/Performers/PerformerCard"; import { RatingStars } from "src/components/Scenes/SceneDetails/RatingStars"; import { sortPerformers } from "src/core/performers"; +import { galleryTitle } from "src/core/galleries"; interface IGalleryDetailProps { gallery: GQL.GalleryDataFragment; @@ -75,7 +76,7 @@ export const GalleryDetailPanel: React.FC = ({ // filename should use entire row if there is no studio const galleryDetailsWidth = gallery.studio ? "col-9" : "col-12"; - const title = gallery.title ?? TextUtils.fileNameFromPath(gallery.path ?? ""); + const title = galleryTitle(gallery); return ( <> diff --git a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryEditPanel.tsx b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryEditPanel.tsx index e6711da47..abcc6365d 100644 --- a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryEditPanel.tsx +++ b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryEditPanel.tsx @@ -31,10 +31,11 @@ import { } from "src/components/Shared"; import { useToast } from "src/hooks"; import { useFormik } from "formik"; -import { FormUtils, TextUtils } from "src/utils"; +import { FormUtils } from "src/utils"; import { RatingStars } from "src/components/Scenes/SceneDetails/RatingStars"; import { GalleryScrapeDialog } from "./GalleryScrapeDialog"; import { faSyncAlt } from "@fortawesome/free-solid-svg-icons"; +import { galleryTitle } from "src/core/galleries"; interface IProps { isVisible: boolean; @@ -60,7 +61,7 @@ export const GalleryEditPanel: React.FC< const [scenes, setScenes] = useState<{ id: string; title: string }[]>( (gallery?.scenes ?? []).map((s) => ({ id: s.id, - title: s.title ?? TextUtils.fileNameFromPath(s.path ?? ""), + title: galleryTitle(s), })) ); diff --git a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryFileInfoPanel.tsx b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryFileInfoPanel.tsx index 66ffcb2dd..fd69d6903 100644 --- a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryFileInfoPanel.tsx +++ b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryFileInfoPanel.tsx @@ -1,7 +1,34 @@ -import React from "react"; +import React, { useMemo } from "react"; +import { Accordion, Card } from "react-bootstrap"; +import { TruncatedText } from "src/components/Shared"; import * as GQL from "src/core/generated-graphql"; +import { TextUtils } from "src/utils"; import { TextField, URLField } from "src/utils/field"; +interface IFileInfoPanelProps { + folder?: Pick; + file?: GQL.GalleryFileDataFragment; +} + +const FileInfoPanel: React.FC = ( + props: IFileInfoPanelProps +) => { + const checksum = props.file?.fingerprints.find((f) => f.type === "md5"); + const path = props.folder ? props.folder.path : props.file?.path ?? ""; + const id = props.folder ? "folder" : "path"; + + return ( +
    + + +
    + ); +}; interface IGalleryFileInfoPanelProps { gallery: GQL.GalleryDataFragment; } @@ -9,25 +36,49 @@ interface IGalleryFileInfoPanelProps { export const GalleryFileInfoPanel: React.FC = ( props: IGalleryFileInfoPanelProps ) => { + const filesPanel = useMemo(() => { + if (props.gallery.folder) { + return ; + } + + if (props.gallery.files.length === 0) { + return <>; + } + + if (props.gallery.files.length === 1) { + return ; + } + + return ( + + {props.gallery.files.map((file, index) => ( + + + + + + + + + + + ))} + + ); + }, [props.gallery]); + return ( -
    - - - -
    + <> +
    + +
    + + {filesPanel} + ); }; diff --git a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryImagesPanel.tsx b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryImagesPanel.tsx index c00d01722..bddd55da5 100644 --- a/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryImagesPanel.tsx +++ b/ui/v2.5/src/components/Galleries/GalleryDetails/GalleryImagesPanel.tsx @@ -6,9 +6,9 @@ import { ImageList } from "src/components/Images/ImageList"; import { mutateRemoveGalleryImages } from "src/core/StashService"; import { showWhenSelected, PersistanceLevel } from "src/hooks/ListHook"; import { useToast } from "src/hooks"; -import { TextUtils } from "src/utils"; import { useIntl } from "react-intl"; import { faMinus } from "@fortawesome/free-solid-svg-icons"; +import { galleryTitle } from "src/core/galleries"; interface IGalleryDetailsProps { gallery: GQL.GalleryDataFragment; @@ -23,7 +23,7 @@ export const GalleryImagesPanel: React.FC = ({ function filterHook(filter: ListFilterModel) { const galleryValue = { id: gallery.id!, - label: gallery.title ?? TextUtils.fileNameFromPath(gallery.path ?? ""), + label: galleryTitle(gallery), }; // if galleries is already present, then we modify it, otherwise add let galleryCriterion = filter.criteria.find((c) => { diff --git a/ui/v2.5/src/components/Galleries/GalleryList.tsx b/ui/v2.5/src/components/Galleries/GalleryList.tsx index 83a8eef90..ab33cff3e 100644 --- a/ui/v2.5/src/components/Galleries/GalleryList.tsx +++ b/ui/v2.5/src/components/Galleries/GalleryList.tsx @@ -9,7 +9,6 @@ import { SlimGalleryDataFragment, } from "src/core/generated-graphql"; import { useGalleriesList } from "src/hooks"; -import { TextUtils } from "src/utils"; import { showWhenSelected, PersistanceLevel } from "src/hooks/ListHook"; import { ListFilterModel } from "src/models/list-filter/filter"; import { DisplayMode } from "src/models/list-filter/types"; @@ -19,6 +18,7 @@ import GalleryWallCard from "./GalleryWallCard"; import { EditGalleriesDialog } from "./EditGalleriesDialog"; import { DeleteGalleriesDialog } from "./DeleteGalleriesDialog"; import { ExportDialog } from "../Shared/ExportDialog"; +import { galleryTitle } from "src/core/galleries"; interface IGalleryList { filterHook?: (filter: ListFilterModel) => ListFilterModel; @@ -201,9 +201,7 @@ export const GalleryList: React.FC = ({ - {gallery.title ?? - TextUtils.fileNameFromPath(gallery.path ?? "")}{" "} - ({gallery.image_count}{" "} + {galleryTitle(gallery)} ({gallery.image_count}{" "} {gallery.image_count === 1 ? "image" : "images"}) diff --git a/ui/v2.5/src/components/Galleries/GalleryViewer.tsx b/ui/v2.5/src/components/Galleries/GalleryViewer.tsx index 7c69ede5b..3c3006310 100644 --- a/ui/v2.5/src/components/Galleries/GalleryViewer.tsx +++ b/ui/v2.5/src/components/Galleries/GalleryViewer.tsx @@ -19,7 +19,7 @@ export const GalleryViewer: React.FC = ({ galleryId }) => {
    showLightbox(index)} onKeyPress={() => showLightbox(index)} > diff --git a/ui/v2.5/src/components/Galleries/GalleryWallCard.tsx b/ui/v2.5/src/components/Galleries/GalleryWallCard.tsx index d2d02aba1..33f6a2dd5 100644 --- a/ui/v2.5/src/components/Galleries/GalleryWallCard.tsx +++ b/ui/v2.5/src/components/Galleries/GalleryWallCard.tsx @@ -5,6 +5,7 @@ import * as GQL from "src/core/generated-graphql"; import { RatingStars, TruncatedText } from "src/components/Shared"; import { TextUtils } from "src/utils"; import { useGalleryLightbox } from "src/hooks"; +import { galleryTitle } from "src/core/galleries"; const CLASSNAME = "GalleryWallCard"; const CLASSNAME_FOOTER = `${CLASSNAME}-footer`; @@ -19,12 +20,16 @@ const GalleryWallCard: React.FC = ({ gallery }) => { const intl = useIntl(); const showLightbox = useGalleryLightbox(gallery.id); + const coverFile = gallery?.cover?.files.length + ? gallery.cover.files[0] + : undefined; + const orientation = - (gallery?.cover?.file.width ?? 0) > (gallery.cover?.file.height ?? 0) + (coverFile?.width ?? 0) > (coverFile?.height ?? 0) ? "landscape" : "portrait"; const cover = gallery?.cover?.paths.thumbnail ?? ""; - const title = gallery.title ?? TextUtils.fileNameFromPath(gallery.path ?? ""); + const title = galleryTitle(gallery); const performerNames = gallery.performers.map((p) => p.name); const performers = performerNames.length >= 2 diff --git a/ui/v2.5/src/components/Galleries/styles.scss b/ui/v2.5/src/components/Galleries/styles.scss index e14fd5674..94d9cfa7b 100644 --- a/ui/v2.5/src/components/Galleries/styles.scss +++ b/ui/v2.5/src/components/Galleries/styles.scss @@ -218,3 +218,16 @@ $galleryTabWidth: 450px; } } } + +.gallery-file-card.card { + margin: 0; + padding: 0; + + .card-header { + cursor: pointer; + } + + dl { + margin-bottom: 0; + } +} diff --git a/ui/v2.5/src/components/Help/Manual.tsx b/ui/v2.5/src/components/Help/Manual.tsx index 83f787c2f..12faa3e26 100644 --- a/ui/v2.5/src/components/Help/Manual.tsx +++ b/ui/v2.5/src/components/Help/Manual.tsx @@ -1,27 +1,27 @@ import React, { useState, useEffect } from "react"; import { Modal, Container, Row, Col, Nav, Tab } from "react-bootstrap"; -import Introduction from "src/docs/en/Introduction.md"; -import Tasks from "src/docs/en/Tasks.md"; -import AutoTagging from "src/docs/en/AutoTagging.md"; -import JSONSpec from "src/docs/en/JSONSpec.md"; -import Configuration from "src/docs/en/Configuration.md"; -import Interface from "src/docs/en/Interface.md"; -import Galleries from "src/docs/en/Galleries.md"; -import Scraping from "src/docs/en/Scraping.md"; -import ScraperDevelopment from "src/docs/en/ScraperDevelopment.md"; -import Plugins from "src/docs/en/Plugins.md"; -import ExternalPlugins from "src/docs/en/ExternalPlugins.md"; -import EmbeddedPlugins from "src/docs/en/EmbeddedPlugins.md"; -import Tagger from "src/docs/en/Tagger.md"; -import Contributing from "src/docs/en/Contributing.md"; -import SceneFilenameParser from "src/docs/en/SceneFilenameParser.md"; -import KeyboardShortcuts from "src/docs/en/KeyboardShortcuts.md"; -import Help from "src/docs/en/Help.md"; -import Deduplication from "src/docs/en/Deduplication.md"; -import Interactive from "src/docs/en/Interactive.md"; -import Captions from "src/docs/en/Captions.md"; -import Identify from "src/docs/en/Identify.md"; -import Browsing from "src/docs/en/Browsing.md"; +import Introduction from "src/docs/en/Manual/Introduction.md"; +import Tasks from "src/docs/en/Manual/Tasks.md"; +import AutoTagging from "src/docs/en/Manual/AutoTagging.md"; +import JSONSpec from "src/docs/en/Manual/JSONSpec.md"; +import Configuration from "src/docs/en/Manual/Configuration.md"; +import Interface from "src/docs/en/Manual/Interface.md"; +import Galleries from "src/docs/en/Manual/Galleries.md"; +import Scraping from "src/docs/en/Manual/Scraping.md"; +import ScraperDevelopment from "src/docs/en/Manual/ScraperDevelopment.md"; +import Plugins from "src/docs/en/Manual/Plugins.md"; +import ExternalPlugins from "src/docs/en/Manual/ExternalPlugins.md"; +import EmbeddedPlugins from "src/docs/en/Manual/EmbeddedPlugins.md"; +import Tagger from "src/docs/en/Manual/Tagger.md"; +import Contributing from "src/docs/en/Manual/Contributing.md"; +import SceneFilenameParser from "src/docs/en/Manual/SceneFilenameParser.md"; +import KeyboardShortcuts from "src/docs/en/Manual/KeyboardShortcuts.md"; +import Help from "src/docs/en/Manual/Help.md"; +import Deduplication from "src/docs/en/Manual/Deduplication.md"; +import Interactive from "src/docs/en/Manual/Interactive.md"; +import Captions from "src/docs/en/Manual/Captions.md"; +import Identify from "src/docs/en/Manual/Identify.md"; +import Browsing from "src/docs/en/Manual/Browsing.md"; import { MarkdownPage } from "../Shared/MarkdownPage"; interface IManualProps { diff --git a/ui/v2.5/src/components/Images/DeleteImagesDialog.tsx b/ui/v2.5/src/components/Images/DeleteImagesDialog.tsx index 059be9f36..86f2ddbb8 100644 --- a/ui/v2.5/src/components/Images/DeleteImagesDialog.tsx +++ b/ui/v2.5/src/components/Images/DeleteImagesDialog.tsx @@ -73,12 +73,19 @@ export const DeleteImagesDialog: React.FC = ( return; } + const deletedFiles: string[] = []; + + props.selected.forEach((s) => { + const paths = s.files.map((f) => f.path); + deletedFiles.push(...paths); + }); + return (

    = ( />

      - {props.selected.slice(0, 5).map((s) => ( -
    • {s.path}
    • + {deletedFiles.slice(0, 5).map((s) => ( +
    • {s}
    • ))} - {props.selected.length > 5 && ( + {deletedFiles.length > 5 && ( = ( props: IImageCardProps ) => { + const file = useMemo( + () => (props.image.files.length > 0 ? props.image.files[0] : undefined), + [props.image] + ); + function maybeRenderTagPopoverButton() { if (props.image.tags.length <= 0) return; @@ -125,9 +130,8 @@ export const ImageCard: React.FC = ( } function isPortrait() { - const { file } = props.image; - const width = file.width ? file.width : 0; - const height = file.height ? file.height : 0; + const width = file?.width ? file.width : 0; + const height = file?.height ? file.height : 0; return height > width; } @@ -135,11 +139,7 @@ export const ImageCard: React.FC = ( diff --git a/ui/v2.5/src/components/Images/ImageDetails/Image.tsx b/ui/v2.5/src/components/Images/ImageDetails/Image.tsx index 338e19e22..b394f3af6 100644 --- a/ui/v2.5/src/components/Images/ImageDetails/Image.tsx +++ b/ui/v2.5/src/components/Images/ImageDetails/Image.tsx @@ -13,7 +13,6 @@ import { } from "src/core/StashService"; import { ErrorMessage, LoadingIndicator, Icon } from "src/components/Shared"; import { useToast } from "src/hooks"; -import { TextUtils } from "src/utils"; import * as Mousetrap from "mousetrap"; import { OCounterButton } from "src/components/Scenes/SceneDetails/OCounterButton"; import { OrganizedButton } from "src/components/Scenes/SceneDetails/OrganizedButton"; @@ -22,6 +21,7 @@ import { ImageEditPanel } from "./ImageEditPanel"; import { ImageDetailPanel } from "./ImageDetailPanel"; import { DeleteImagesDialog } from "../DeleteImagesDialog"; import { faEllipsisV } from "@fortawesome/free-solid-svg-icons"; +import { objectPath, objectTitle } from "src/core/files"; interface IImageParams { id?: string; @@ -48,12 +48,12 @@ export const Image: React.FC = () => { const [isDeleteAlertOpen, setIsDeleteAlertOpen] = useState(false); async function onRescan() { - if (!image) { + if (!image || !image.files.length) { return; } await mutateMetadataScan({ - paths: [image.path], + paths: [objectPath(image)], }); Toast.success({ @@ -251,10 +251,12 @@ export const Image: React.FC = () => { return ; } + const title = objectTitle(image); + return (
      - {image.title ?? TextUtils.fileNameFromPath(image.path)} + {title} {maybeRenderDeleteDialog()} @@ -271,16 +273,14 @@ export const Image: React.FC = () => { )} -

      - {image.title ?? TextUtils.fileNameFromPath(image.path)} -

      +

      {title}

      {renderTabs()}
    {image.title
    diff --git a/ui/v2.5/src/components/Images/ImageDetails/ImageDetailPanel.tsx b/ui/v2.5/src/components/Images/ImageDetails/ImageDetailPanel.tsx index 1852cd3b1..69202a3d2 100644 --- a/ui/v2.5/src/components/Images/ImageDetails/ImageDetailPanel.tsx +++ b/ui/v2.5/src/components/Images/ImageDetails/ImageDetailPanel.tsx @@ -1,4 +1,4 @@ -import React from "react"; +import React, { useMemo } from "react"; import { Link } from "react-router-dom"; import * as GQL from "src/core/generated-graphql"; import { TextUtils } from "src/utils"; @@ -7,6 +7,7 @@ import { PerformerCard } from "src/components/Performers/PerformerCard"; import { RatingStars } from "src/components/Scenes/SceneDetails/RatingStars"; import { sortPerformers } from "src/core/performers"; import { FormattedMessage, useIntl } from "react-intl"; +import { objectTitle } from "src/core/files"; interface IImageDetailProps { image: GQL.ImageDataFragment; @@ -15,6 +16,11 @@ interface IImageDetailProps { export const ImageDetailPanel: React.FC = (props) => { const intl = useIntl(); + const file = useMemo( + () => (props.image.files.length > 0 ? props.image.files[0] : undefined), + [props.image] + ); + function renderTags() { if (props.image.tags.length === 0) return; const tags = props.image.tags.map((tag) => ( @@ -82,12 +88,7 @@ export const ImageDetailPanel: React.FC = (props) => {

    - +

    {props.image.rating ? ( @@ -99,13 +100,10 @@ export const ImageDetailPanel: React.FC = (props) => { "" )} {renderGalleries()} - {props.image.file.width && props.image.file.height ? ( + {file?.width && file?.height ? (
    :{" "} - {TextUtils.resolution( - props.image.file.width, - props.image.file.height - )} + {TextUtils.resolution(file.width, file.height)}
    ) : ( "" diff --git a/ui/v2.5/src/components/Images/ImageDetails/ImageFileInfoPanel.tsx b/ui/v2.5/src/components/Images/ImageDetails/ImageFileInfoPanel.tsx index cefacb959..c3b6bfded 100644 --- a/ui/v2.5/src/components/Images/ImageDetails/ImageFileInfoPanel.tsx +++ b/ui/v2.5/src/components/Images/ImageDetails/ImageFileInfoPanel.tsx @@ -1,22 +1,24 @@ import React from "react"; +import { Accordion, Card } from "react-bootstrap"; import { FormattedNumber } from "react-intl"; +import { TruncatedText } from "src/components/Shared"; import * as GQL from "src/core/generated-graphql"; import { TextUtils } from "src/utils"; import { TextField, URLField } from "src/utils/field"; -interface IImageFileInfoPanelProps { - image: GQL.ImageDataFragment; +interface IFileInfoPanelProps { + file: GQL.ImageFileDataFragment; } -export const ImageFileInfoPanel: React.FC = ( - props: IImageFileInfoPanelProps +const FileInfoPanel: React.FC = ( + props: IFileInfoPanelProps ) => { function renderFileSize() { - if (props.image.file.size === undefined) { + if (props.file.size === undefined) { return; } - const { size, unit } = TextUtils.fileSize(props.image.file.size ?? 0); + const { size, unit } = TextUtils.fileSize(props.file.size ?? 0); return ( @@ -34,25 +36,55 @@ export const ImageFileInfoPanel: React.FC = ( ); } + const checksum = props.file.fingerprints.find((f) => f.type === "md5"); + return (
    - + {renderFileSize()}
    ); }; +interface IImageFileInfoPanelProps { + image: GQL.ImageDataFragment; +} + +export const ImageFileInfoPanel: React.FC = ( + props: IImageFileInfoPanelProps +) => { + if (props.image.files.length === 0) { + return <>; + } + + if (props.image.files.length === 1) { + return ; + } + + return ( + + {props.image.files.map((file, index) => ( + + + + + + + + + + + ))} + + ); +}; diff --git a/ui/v2.5/src/components/Images/ImageList.tsx b/ui/v2.5/src/components/Images/ImageList.tsx index f285be24f..76b0ccf32 100644 --- a/ui/v2.5/src/components/Images/ImageList.tsx +++ b/ui/v2.5/src/components/Images/ImageList.tsx @@ -10,7 +10,6 @@ import { import * as GQL from "src/core/generated-graphql"; import { queryFindImages } from "src/core/StashService"; import { useImagesList, useLightbox } from "src/hooks"; -import { TextUtils } from "src/utils"; import { ListFilterModel } from "src/models/list-filter/filter"; import { DisplayMode } from "src/models/list-filter/types"; import { @@ -24,6 +23,7 @@ import { EditImagesDialog } from "./EditImagesDialog"; import { DeleteImagesDialog } from "./DeleteImagesDialog"; import "flexbin/flexbin.css"; import { ExportDialog } from "../Shared/ExportDialog"; +import { objectTitle } from "src/core/files"; interface IImageWallProps { images: GQL.SlimImageDataFragment[]; @@ -46,7 +46,7 @@ const ImageWall: React.FC = ({ images, handleImageOpen }) => { src={image.paths.thumbnail ?? ""} loading="lazy" className="gallery-image" - alt={image.title ?? TextUtils.fileNameFromPath(image.path)} + alt={objectTitle(image)} />
    )); diff --git a/ui/v2.5/src/components/Images/styles.scss b/ui/v2.5/src/components/Images/styles.scss index b05223cc8..9216bc92d 100644 --- a/ui/v2.5/src/components/Images/styles.scss +++ b/ui/v2.5/src/components/Images/styles.scss @@ -114,3 +114,16 @@ $imageTabWidth: 450px; height: calc(1.5em + 0.75rem + 2px); } } + +.image-file-card.card { + margin: 0; + padding: 0; + + .card-header { + cursor: pointer; + } + + dl { + margin-bottom: 0; + } +} diff --git a/ui/v2.5/src/components/SceneDuplicateChecker/SceneDuplicateChecker.tsx b/ui/v2.5/src/components/SceneDuplicateChecker/SceneDuplicateChecker.tsx index 557ee43f7..6ef3c2579 100644 --- a/ui/v2.5/src/components/SceneDuplicateChecker/SceneDuplicateChecker.tsx +++ b/ui/v2.5/src/components/SceneDuplicateChecker/SceneDuplicateChecker.tsx @@ -134,10 +134,8 @@ export const SceneDuplicateChecker: React.FC = () => { setEditingScenes(true); } - const renderFilesize = (filesize: string | null | undefined) => { - const { size: parsedSize, unit } = TextUtils.fileSize( - Number.parseInt(filesize ?? "0", 10) - ); + const renderFilesize = (filesize: number | null | undefined) => { + const { size: parsedSize, unit } = TextUtils.fileSize(filesize ?? 0); return ( { {filteredScenes.map((group, groupIndex) => - group.map((scene, i) => ( - <> - {i === 0 && groupIndex !== 0 ? ( - - ) : undefined} - - - - handleCheck(e.currentTarget.checked, scene.id) - } - /> - - - { + const file = + scene.files.length > 0 ? scene.files[0] : undefined; + + return ( + <> + {i === 0 && groupIndex !== 0 ? ( + + ) : undefined} + + + + handleCheck(e.currentTarget.checked, scene.id) + } + /> + + + + } + placement="right" + > - } - placement="right" - > - + + +

    + + {scene.title + ? scene.title + : TextUtils.fileNameFromPath(file?.path ?? "")} + +

    +

    {file?.path ?? ""}

    + + + {maybeRenderPopoverButtonGroup(scene)} + + + {file?.duration && + TextUtils.secondsToTimestamp(file.duration)} + + {renderFilesize(file?.size ?? 0)} + {`${file?.width ?? 0}x${file?.height ?? 0}`} + + -
    - - -

    - - {scene.title ?? - TextUtils.fileNameFromPath(scene.path)} - -

    -

    {scene.path}

    - - - {maybeRenderPopoverButtonGroup(scene)} - - - {scene.file.duration && - TextUtils.secondsToTimestamp(scene.file.duration)} - - {renderFilesize(scene.file.size)} - {`${scene.file.width}x${scene.file.height}`} - - -  mbps - - {scene.file.video_codec} - - - - - - )) +  mbps + + {file?.video_codec ?? ""} + + + + + + ); + }) )} diff --git a/ui/v2.5/src/components/SceneFilenameParser/SceneParserRow.tsx b/ui/v2.5/src/components/SceneFilenameParser/SceneParserRow.tsx index 472b4beba..bd92ec29d 100644 --- a/ui/v2.5/src/components/SceneFilenameParser/SceneParserRow.tsx +++ b/ui/v2.5/src/components/SceneFilenameParser/SceneParserRow.tsx @@ -11,8 +11,8 @@ import { TagSelect, StudioSelect, } from "src/components/Shared"; -import { TextUtils } from "src/utils"; import cx from "classnames"; +import { objectTitle } from "src/core/files"; class ParserResult { public value?: T; @@ -51,7 +51,7 @@ export class SceneParserResult { this.scene = result.scene; this.id = this.scene.id; - this.filename = TextUtils.fileNameFromPath(this.scene.path); + this.filename = objectTitle(this.scene); this.title.setOriginalValue(this.scene.title ?? undefined); this.date.setOriginalValue(this.scene.date ?? undefined); this.rating.setOriginalValue(this.scene.rating ?? undefined); diff --git a/ui/v2.5/src/components/ScenePlayer/ScenePlayer.tsx b/ui/v2.5/src/components/ScenePlayer/ScenePlayer.tsx index 4e4dd2a22..9f5ad7080 100644 --- a/ui/v2.5/src/components/ScenePlayer/ScenePlayer.tsx +++ b/ui/v2.5/src/components/ScenePlayer/ScenePlayer.tsx @@ -3,6 +3,7 @@ import React, { useCallback, useContext, useEffect, + useMemo, useRef, useState, } from "react"; @@ -153,6 +154,11 @@ export const ScenePlayer: React.FC = ({ const started = useRef(false); const interactiveReady = useRef(false); + const file = useMemo( + () => ((scene?.files.length ?? 0) > 0 ? scene?.files[0] : undefined), + [scene] + ); + const maxLoopDuration = config?.maximumLoopDuration ?? 0; useEffect(() => { @@ -303,7 +309,7 @@ export const ScenePlayer: React.FC = ({ } function handleOffset(player: VideoJsPlayer) { - if (!scene) return; + if (!scene || !file) return; const currentSrc = player.currentSrc(); @@ -312,7 +318,7 @@ export const ScenePlayer: React.FC = ({ const curTime = player.currentTime(); if (!isDirect) { - (player as any).setOffsetDuration(scene.file.duration); + (player as any).setOffsetDuration(file.duration); } else { (player as any).clearOffsetDuration(); } @@ -487,7 +493,7 @@ export const ScenePlayer: React.FC = ({ player.on("loadedmetadata", loadedmetadata); // don't re-initialise the player unless the scene has changed - if (!scene || scene.id === sceneId.current) return; + if (!scene || !file || scene.id === sceneId.current) return; sceneId.current = scene.id; // always stop the interactive client on initialisation @@ -499,10 +505,7 @@ export const ScenePlayer: React.FC = ({ if (!auto && scene.paths?.screenshot) player.poster(scene.paths.screenshot); else player.poster(""); - const isLandscape = - scene.file.height && - scene.file.width && - scene.file.width > scene.file.height; + const isLandscape = file.height && file.width && file.width > file.height; if (isLandscape) { (player as any).landscapeFullscreen({ @@ -551,9 +554,9 @@ export const ScenePlayer: React.FC = ({ player.currentTime(0); const looping = - !!scene.file.duration && + !!file.duration && maxLoopDuration !== 0 && - scene.file.duration < maxLoopDuration; + file.duration < maxLoopDuration; player.loop(looping); interactiveClient.setLooping(looping); @@ -588,6 +591,7 @@ export const ScenePlayer: React.FC = ({ }; }, [ scene, + file, config?.autostartVideo, maxLoopDuration, initialTimestamp, @@ -649,10 +653,7 @@ export const ScenePlayer: React.FC = ({ }; const isPortrait = - scene && - scene.file.height && - scene.file.width && - scene.file.height > scene.file.width; + scene && file && file.height && file.width && file.height > file.width; return (
    @@ -667,8 +668,9 @@ export const ScenePlayer: React.FC = ({ {scene?.interactive && (interactiveState !== ConnectionState.Ready || playerRef.current?.paused()) && } - {scene && ( + {scene && file && ( void; @@ -133,14 +134,14 @@ export const ScenePlayerScrubber: React.FC = ( if (!scrubberSliderEl.current) { return; } - const duration = Number(props.scene.file.duration); + const duration = Number(props.file.duration); const percentage = props.position / duration; const position = (scrubberSliderEl.current.scrollWidth * percentage - scrubberSliderEl.current.clientWidth / 2) * -1; setPosition(position, false); - }, [props.position, props.scene.file.duration, setPosition]); + }, [props.position, props.file.duration, setPosition]); useEffect(() => { window.addEventListener("mouseup", onMouseUp, false); @@ -193,7 +194,7 @@ export const ScenePlayerScrubber: React.FC = ( const offset = target.offsetLeft + target.clientWidth * spritePercentage; const percentage = offset / scrubberSliderEl.current.scrollWidth; - seekSeconds = percentage * (props.scene.file.duration || 0); + seekSeconds = percentage * (props.file.duration || 0); } const markerIdString = target.getAttribute("data-marker-id"); @@ -287,7 +288,7 @@ export const ScenePlayerScrubber: React.FC = ( } const marker = props.scene.scene_markers[i]; - const duration = Number(props.scene.file.duration); + const duration = Number(props.file.duration); const percentage = marker.seconds / duration; const left = diff --git a/ui/v2.5/src/components/Scenes/DeleteScenesDialog.tsx b/ui/v2.5/src/components/Scenes/DeleteScenesDialog.tsx index 208f2c191..c98785e1c 100644 --- a/ui/v2.5/src/components/Scenes/DeleteScenesDialog.tsx +++ b/ui/v2.5/src/components/Scenes/DeleteScenesDialog.tsx @@ -7,6 +7,7 @@ import { useToast } from "src/hooks"; import { ConfigurationContext } from "src/hooks/Config"; import { FormattedMessage, useIntl } from "react-intl"; import { faTrashAlt } from "@fortawesome/free-solid-svg-icons"; +import { objectPath } from "src/core/files"; interface IDeleteSceneDialogProps { selected: GQL.SlimSceneDataFragment[]; @@ -69,13 +70,13 @@ export const DeleteScenesDialog: React.FC = ( setIsDeleting(false); } - function funscriptPath(scenePath: string) { - const extIndex = scenePath.lastIndexOf("."); + function funscriptPath(sp: string) { + const extIndex = sp.lastIndexOf("."); if (extIndex !== -1) { - return scenePath.substring(0, extIndex + 1) + "funscript"; + return sp.substring(0, extIndex + 1) + "funscript"; } - return scenePath; + return sp; } function maybeRenderDeleteFileAlert() { @@ -86,9 +87,10 @@ export const DeleteScenesDialog: React.FC = ( const deletedFiles: string[] = []; props.selected.forEach((s) => { - deletedFiles.push(s.path); - if (s.interactive) { - deletedFiles.push(funscriptPath(s.path)); + const paths = s.files.map((f) => f.path); + deletedFiles.push(...paths); + if (s.interactive && s.files.length) { + deletedFiles.push(funscriptPath(objectPath(s))); } }); @@ -97,7 +99,7 @@ export const DeleteScenesDialog: React.FC = (

    = ( ) => { const { configuration } = React.useContext(ConfigurationContext); + const file = useMemo( + () => (props.scene.files.length > 0 ? props.scene.files[0] : undefined), + [props.scene] + ); + // studio image is missing if it uses the default const missingStudioImage = props.scene.studio?.image_path?.endsWith( "?default=true" @@ -101,8 +107,8 @@ export const SceneCard: React.FC = ( function maybeRenderSceneSpecsOverlay() { let sizeObj = null; - if (props.scene.file.size) { - sizeObj = TextUtils.fileSize(parseInt(props.scene.file.size)); + if (file?.size) { + sizeObj = TextUtils.fileSize(file.size); } return (

    @@ -119,19 +125,16 @@ export const SceneCard: React.FC = ( ) : ( "" )} - {props.scene.file.width && props.scene.file.height ? ( + {file?.width && file?.height ? ( {" "} - {TextUtils.resolution( - props.scene.file.width, - props.scene.file.height - )} + {TextUtils.resolution(file?.width, file?.height)} ) : ( "" )} - {(props.scene.file.duration ?? 0) >= 1 - ? TextUtils.secondsToTimestamp(props.scene.file.duration ?? 0) + {(file?.duration ?? 0) >= 1 + ? TextUtils.secondsToTimestamp(file?.duration ?? 0) : ""}
    ); @@ -300,11 +303,15 @@ export const SceneCard: React.FC = ( } function maybeRenderDupeCopies() { - if (props.scene.phash) { + const phash = file + ? file.fingerprints.find((fp) => fp.type === "phash") + : undefined; + + if (phash) { return (
    diff --git a/ui/v2.5/src/components/Scenes/SceneDetails/Scene.tsx b/ui/v2.5/src/components/Scenes/SceneDetails/Scene.tsx index b036b2b17..49e3ba718 100644 --- a/ui/v2.5/src/components/Scenes/SceneDetails/Scene.tsx +++ b/ui/v2.5/src/components/Scenes/SceneDetails/Scene.tsx @@ -19,9 +19,8 @@ import { import Icon from "src/components/Shared/Icon"; import { useToast } from "src/hooks"; -import SceneQueue from "src/models/sceneQueue"; +import SceneQueue, { QueuedScene } from "src/models/sceneQueue"; import { ListFilterModel } from "src/models/list-filter/filter"; -import TextUtils from "src/utils/text"; import Mousetrap from "mousetrap"; import { OCounterButton } from "./OCounterButton"; import { OrganizedButton } from "./OrganizedButton"; @@ -51,12 +50,13 @@ const SceneGalleriesPanel = lazy(() => import("./SceneGalleriesPanel")); const DeleteScenesDialog = lazy(() => import("../DeleteScenesDialog")); const GenerateDialog = lazy(() => import("../../Dialogs/GenerateDialog")); const SceneVideoFilterPanel = lazy(() => import("./SceneVideoFilterPanel")); +import { objectPath, objectTitle } from "src/core/files"; interface IProps { scene: GQL.SceneDataFragment; refetch: () => void; setTimestamp: (num: number) => void; - queueScenes: GQL.SceneDataFragment[]; + queueScenes: QueuedScene[]; onQueueNext: () => void; onQueuePrevious: () => void; onQueueRandom: () => void; @@ -185,7 +185,7 @@ const ScenePage: React.FC = ({ async function onRescan() { await mutateMetadataScan({ - paths: [scene.path], + paths: [objectPath(scene)], }); Toast.success({ @@ -452,10 +452,12 @@ const ScenePage: React.FC = ({ return collapsed ? ">" : "<"; } + const title = objectTitle(scene); + return ( <> - {scene.title ?? TextUtils.fileNameFromPath(scene.path)} + {title} {maybeRenderSceneGenerateDialog()} {maybeRenderDeleteDialog()} @@ -476,9 +478,7 @@ const ScenePage: React.FC = ({ )} -

    - {scene.title ?? TextUtils.fileNameFromPath(scene.path)} -

    +

    {title}

    {renderTabs()}
    @@ -519,7 +519,7 @@ const SceneLoader: React.FC = () => { () => SceneQueue.fromQueryParameters(location.search), [location.search] ); - const [queueScenes, setQueueScenes] = useState([]); + const [queueScenes, setQueueScenes] = useState([]); const [queueTotal, setQueueTotal] = useState(0); const [queueStart, setQueueStart] = useState(1); @@ -592,7 +592,7 @@ const SceneLoader: React.FC = () => { const { scenes } = query.data.findScenes; // prepend scenes to scene list - const newScenes = scenes.concat(queueScenes); + const newScenes = (scenes as QueuedScene[]).concat(queueScenes); setQueueScenes(newScenes); setQueueStart(newStart); } @@ -613,7 +613,7 @@ const SceneLoader: React.FC = () => { const { scenes } = query.data.findScenes; // append scenes to scene list - const newScenes = scenes.concat(queueScenes); + const newScenes = (scenes as QueuedScene[]).concat(queueScenes); setQueueScenes(newScenes); // don't change queue start } diff --git a/ui/v2.5/src/components/Scenes/SceneDetails/SceneDetailPanel.tsx b/ui/v2.5/src/components/Scenes/SceneDetails/SceneDetailPanel.tsx index 1f4736e2c..1b4a18f5c 100644 --- a/ui/v2.5/src/components/Scenes/SceneDetails/SceneDetailPanel.tsx +++ b/ui/v2.5/src/components/Scenes/SceneDetails/SceneDetailPanel.tsx @@ -1,4 +1,4 @@ -import React from "react"; +import React, { useMemo } from "react"; import { Link } from "react-router-dom"; import { FormattedDate, FormattedMessage, useIntl } from "react-intl"; import * as GQL from "src/core/generated-graphql"; @@ -8,6 +8,7 @@ import TruncatedText from "src/components/Shared/TruncatedText"; import { PerformerCard } from "src/components/Performers/PerformerCard"; import { sortPerformers } from "src/core/performers"; import { RatingStars } from "./RatingStars"; +import { objectTitle } from "src/core/files"; interface ISceneDetailProps { scene: GQL.SceneDataFragment; @@ -16,6 +17,11 @@ interface ISceneDetailProps { export const SceneDetailPanel: React.FC = (props) => { const intl = useIntl(); + const file = useMemo( + () => (props.scene.files.length > 0 ? props.scene.files[0] : undefined), + [props.scene] + ); + function renderDetails() { if (!props.scene.details || props.scene.details === "") return; return ( @@ -81,12 +87,7 @@ export const SceneDetailPanel: React.FC = (props) => {

    - +

    {props.scene.date ? ( @@ -106,13 +107,10 @@ export const SceneDetailPanel: React.FC = (props) => { ) : ( "" )} - {props.scene.file.width && props.scene.file.height && ( + {file?.width && file?.height && (
    :{" "} - {TextUtils.resolution( - props.scene.file.width, - props.scene.file.height - )} + {TextUtils.resolution(file.width, file.height)}
    )}
    diff --git a/ui/v2.5/src/components/Scenes/SceneDetails/SceneEditPanel.tsx b/ui/v2.5/src/components/Scenes/SceneDetails/SceneEditPanel.tsx index 94d9687b8..f1c3f93b7 100644 --- a/ui/v2.5/src/components/Scenes/SceneDetails/SceneEditPanel.tsx +++ b/ui/v2.5/src/components/Scenes/SceneDetails/SceneEditPanel.tsx @@ -31,7 +31,7 @@ import { URLField, } from "src/components/Shared"; import useToast from "src/hooks/Toast"; -import { ImageUtils, FormUtils, TextUtils, getStashIDs } from "src/utils"; +import { ImageUtils, FormUtils, getStashIDs } from "src/utils"; import { MovieSelect } from "src/components/Shared/Select"; import { useFormik } from "formik"; import { Prompt } from "react-router-dom"; @@ -44,6 +44,7 @@ import { faSyncAlt, faTrashAlt, } from "@fortawesome/free-solid-svg-icons"; +import { objectTitle } from "src/core/files"; const SceneScrapeDialog = lazy(() => import("./SceneScrapeDialog")); const SceneQueryModal = lazy(() => import("./SceneQueryModal")); @@ -65,7 +66,7 @@ export const SceneEditPanel: React.FC = ({ const [galleries, setGalleries] = useState<{ id: string; title: string }[]>( scene.galleries.map((g) => ({ id: g.id, - title: g.title ?? TextUtils.fileNameFromPath(g.path ?? ""), + title: objectTitle(g), })) ); diff --git a/ui/v2.5/src/components/Scenes/SceneDetails/SceneFileInfoPanel.tsx b/ui/v2.5/src/components/Scenes/SceneDetails/SceneFileInfoPanel.tsx index f8c1c3063..e70be6428 100644 --- a/ui/v2.5/src/components/Scenes/SceneDetails/SceneFileInfoPanel.tsx +++ b/ui/v2.5/src/components/Scenes/SceneDetails/SceneFileInfoPanel.tsx @@ -1,26 +1,22 @@ -import React from "react"; +import React, { useMemo } from "react"; +import { Accordion, Card } from "react-bootstrap"; import { FormattedMessage, FormattedNumber, useIntl } from "react-intl"; +import { TruncatedText } from "src/components/Shared"; import * as GQL from "src/core/generated-graphql"; import { NavUtils, TextUtils, getStashboxBase } from "src/utils"; import { TextField, URLField } from "src/utils/field"; -interface ISceneFileInfoPanelProps { - scene: GQL.SceneDataFragment; +interface IFileInfoPanelProps { + file: GQL.VideoFileDataFragment; } -export const SceneFileInfoPanel: React.FC = ( - props: ISceneFileInfoPanelProps +const FileInfoPanel: React.FC = ( + props: IFileInfoPanelProps ) => { const intl = useIntl(); function renderFileSize() { - if (props.scene.file.size === undefined) { - return; - } - - const { size, unit } = TextUtils.fileSize( - Number.parseInt(props.scene.file.size ?? "0", 10) - ); + const { size, unit } = TextUtils.fileSize(props.file.size); return ( @@ -38,6 +34,78 @@ export const SceneFileInfoPanel: React.FC = ( ); } + // TODO - generalise fingerprints + const oshash = props.file.fingerprints.find((f) => f.type === "oshash"); + const phash = props.file.fingerprints.find((f) => f.type === "phash"); + const checksum = props.file.fingerprints.find((f) => f.type === "md5"); + + return ( +
    + + + + + {renderFileSize()} + + + + + + + + + + +
    + ); +}; + +interface ISceneFileInfoPanelProps { + scene: GQL.SceneDataFragment; +} + +export const SceneFileInfoPanel: React.FC = ( + props: ISceneFileInfoPanelProps +) => { function renderStashIDs() { if (!props.scene.stash_ids.length) { return; @@ -96,83 +164,55 @@ export const SceneFileInfoPanel: React.FC = ( } } + const filesPanel = useMemo(() => { + if (props.scene.files.length === 0) { + return; + } + + if (props.scene.files.length === 1) { + return ; + } + + return ( + + {props.scene.files.map((file, index) => ( + + + + + + + + + + + ))} + + ); + }, [props.scene]); + return ( -
    - - - - - - {renderFunscript()} - {renderInteractiveSpeed()} - {renderFileSize()} - - - - +
    + - - - - - - - - {renderStashIDs()} -
    + {renderStashIDs()} +
    + + {filesPanel} + ); }; diff --git a/ui/v2.5/src/components/Scenes/SceneDetails/SceneVideoFilterPanel.tsx b/ui/v2.5/src/components/Scenes/SceneDetails/SceneVideoFilterPanel.tsx index f45c2bfae..746a0acb6 100644 --- a/ui/v2.5/src/components/Scenes/SceneDetails/SceneVideoFilterPanel.tsx +++ b/ui/v2.5/src/components/Scenes/SceneDetails/SceneVideoFilterPanel.tsx @@ -505,9 +505,12 @@ export const SceneVideoFilterPanel: React.FC = ( setRotateValue(3); } + const file = + props.scene.files.length > 0 ? props.scene.files[0] : undefined; + // Calculate Required Scaling. - const sceneWidth = props.scene.file.width ?? 1; - const sceneHeight = props.scene.file.height ?? 1; + const sceneWidth = file?.width ?? 1; + const sceneHeight = file?.height ?? 1; const sceneAspectRatio = sceneWidth / sceneHeight; const sceneNewAspectRatio = sceneHeight / sceneWidth; diff --git a/ui/v2.5/src/components/Scenes/SceneListTable.tsx b/ui/v2.5/src/components/Scenes/SceneListTable.tsx index b4b7803b0..21af51dd1 100644 --- a/ui/v2.5/src/components/Scenes/SceneListTable.tsx +++ b/ui/v2.5/src/components/Scenes/SceneListTable.tsx @@ -7,6 +7,7 @@ import * as GQL from "src/core/generated-graphql"; import { NavUtils, TextUtils } from "src/utils"; import { Icon } from "src/components/Shared"; import { FormattedMessage } from "react-intl"; +import { objectTitle } from "src/core/files"; interface ISceneListTableProps { scenes: GQL.SlimSceneDataFragment[]; @@ -47,6 +48,10 @@ export const SceneListTable: React.FC = ( : `/scenes/${scene.id}`; let shiftKey = false; + + const file = scene.files.length > 0 ? scene.files[0] : undefined; + + const title = objectTitle(scene); return ( @@ -73,21 +78,18 @@ export const SceneListTable: React.FC = ( {scene.title -
    {scene.title ?? TextUtils.fileNameFromPath(scene.path)}
    +
    {title}
    {scene.rating ? scene.rating : ""} - - {scene.file.duration && - TextUtils.secondsToTimestamp(scene.file.duration)} - + {file?.duration && TextUtils.secondsToTimestamp(file.duration)} {renderTags(scene.tags)} {renderPerformers(scene.performers)} diff --git a/ui/v2.5/src/components/Scenes/styles.scss b/ui/v2.5/src/components/Scenes/styles.scss index e82106878..5bce688ae 100644 --- a/ui/v2.5/src/components/Scenes/styles.scss +++ b/ui/v2.5/src/components/Scenes/styles.scss @@ -73,12 +73,6 @@ } } -.file-info-panel { - div { - margin-bottom: 0.5rem; - } -} - textarea.scene-description { min-height: 150px; } @@ -631,3 +625,16 @@ input[type="range"].blue-slider { cursor: pointer; } } + +.scene-file-card.card { + margin: 0; + padding: 0; + + .card-header { + cursor: pointer; + } + + dl { + margin-bottom: 0; + } +} diff --git a/ui/v2.5/src/components/Settings/Settings.tsx b/ui/v2.5/src/components/Settings/Settings.tsx index 56d526bd7..1a408dcc1 100644 --- a/ui/v2.5/src/components/Settings/Settings.tsx +++ b/ui/v2.5/src/components/Settings/Settings.tsx @@ -17,6 +17,7 @@ import { SettingsServicesPanel } from "./SettingsServicesPanel"; import { SettingsContext } from "./context"; import { SettingsLibraryPanel } from "./SettingsLibraryPanel"; import { SettingsSecurityPanel } from "./SettingsSecurityPanel"; +import Changelog from "../Changelog/Changelog"; export const Settings: React.FC = () => { const intl = useIntl(); @@ -92,6 +93,11 @@ export const Settings: React.FC = () => { + + + + + @@ -138,6 +144,9 @@ export const Settings: React.FC = () => { + + + diff --git a/ui/v2.5/src/components/Settings/Tasks/ScanOptions.tsx b/ui/v2.5/src/components/Settings/Tasks/ScanOptions.tsx index 0e01cfaa0..2f9497ee3 100644 --- a/ui/v2.5/src/components/Settings/Tasks/ScanOptions.tsx +++ b/ui/v2.5/src/components/Settings/Tasks/ScanOptions.tsx @@ -12,8 +12,6 @@ export const ScanOptions: React.FC = ({ setOptions: setOptionsState, }) => { const { - useFileMetadata, - stripFileExtension, scanGeneratePreviews, scanGenerateImagePreviews, scanGenerateSprites, @@ -63,18 +61,6 @@ export const ScanOptions: React.FC = ({ headingID="config.tasks.generate_thumbnails_during_scan" onChange={(v) => setOptions({ scanGenerateThumbnails: v })} /> - setOptions({ stripFileExtension: v })} - /> - setOptions({ useFileMetadata: v })} - /> ); }; diff --git a/ui/v2.5/src/components/Setup/Migrate.tsx b/ui/v2.5/src/components/Setup/Migrate.tsx index 6dd6851ec..3b2226a71 100644 --- a/ui/v2.5/src/components/Setup/Migrate.tsx +++ b/ui/v2.5/src/components/Setup/Migrate.tsx @@ -1,9 +1,11 @@ -import React, { useEffect, useState } from "react"; +import React, { useEffect, useMemo, useState } from "react"; import { Button, Card, Container, Form } from "react-bootstrap"; import { useIntl, FormattedMessage } from "react-intl"; import * as GQL from "src/core/generated-graphql"; import { useSystemStatus, mutateMigrate } from "src/core/StashService"; +import { migrationNotes } from "src/docs/en/MigrationNotes"; import { LoadingIndicator } from "../Shared"; +import { MarkdownPage } from "../Shared/MarkdownPage"; export const Migrate: React.FC = () => { const { data: systemStatus, loading } = useSystemStatus(); @@ -45,8 +47,46 @@ export const Migrate: React.FC = () => { } }, [defaultBackupPath, backupPath]); + const status = systemStatus?.systemStatus; + + const maybeMigrationNotes = useMemo(() => { + if ( + !status || + status.databaseSchema === undefined || + status.databaseSchema === null || + status.appSchema === undefined || + status.appSchema === null + ) + return; + + const notes = []; + for (let i = status.databaseSchema + 1; i <= status.appSchema; ++i) { + const note = migrationNotes[i]; + if (note) { + notes.push(note); + } + } + + if (notes.length === 0) return; + + return ( +
    +

    + +

    +
    + {notes.map((n, i) => ( +
    + +
    + ))} +
    +
    + ); + }, [status]); + // only display setup wizard if system is not setup - if (loading || !systemStatus) { + if (loading || !systemStatus || !status) { return ; } @@ -67,8 +107,6 @@ export const Migrate: React.FC = () => { return ; } - const status = systemStatus.systemStatus; - async function onMigrate() { try { setMigrateLoading(true); @@ -148,6 +186,8 @@ export const Migrate: React.FC = () => {

    + {maybeMigrationNotes} +
    diff --git a/ui/v2.5/src/components/Setup/styles.scss b/ui/v2.5/src/components/Setup/styles.scss new file mode 100644 index 000000000..e10d67c62 --- /dev/null +++ b/ui/v2.5/src/components/Setup/styles.scss @@ -0,0 +1,9 @@ +.migration-notes { + margin: 1rem; + + > div { + background-color: darken($color: $card-bg, $amount: 3); + border-radius: 3px; + padding: 16px; + } +} diff --git a/ui/v2.5/src/components/Shared/Select.tsx b/ui/v2.5/src/components/Shared/Select.tsx index bc494cfc9..f90a69432 100644 --- a/ui/v2.5/src/components/Shared/Select.tsx +++ b/ui/v2.5/src/components/Shared/Select.tsx @@ -22,10 +22,11 @@ import { usePerformerCreate, } from "src/core/StashService"; import { useToast } from "src/hooks"; -import { TextUtils } from "src/utils"; import { SelectComponents } from "react-select/src/components"; import { ConfigurationContext } from "src/hooks/Config"; import { useIntl } from "react-intl"; +import { objectTitle } from "src/core/files"; +import { galleryTitle } from "src/core/galleries"; export type ValidTypes = | GQL.SlimPerformerDataFragment @@ -277,7 +278,7 @@ export const GallerySelect: React.FC = (props) => { const galleries = data?.findGalleries.galleries ?? []; const items = galleries.map((g) => ({ - label: g.title ?? TextUtils.fileNameFromPath(g.path ?? ""), + label: galleryTitle(g), value: g.id, })); @@ -328,7 +329,7 @@ export const SceneSelect: React.FC = (props) => { const scenes = data?.findScenes.scenes ?? []; const items = scenes.map((s) => ({ - label: s.title ?? TextUtils.fileNameFromPath(s.path ?? ""), + label: objectTitle(s), value: s.id, })); diff --git a/ui/v2.5/src/components/Shared/TagLink.tsx b/ui/v2.5/src/components/Shared/TagLink.tsx index e5e7d8fb7..441c0c3ac 100644 --- a/ui/v2.5/src/components/Shared/TagLink.tsx +++ b/ui/v2.5/src/components/Shared/TagLink.tsx @@ -8,10 +8,22 @@ import { TagDataFragment, MovieDataFragment, SceneDataFragment, - GalleryDataFragment, } from "src/core/generated-graphql"; import NavUtils from "src/utils/navigation"; import TextUtils from "src/utils/text"; +import { objectTitle } from "src/core/files"; +import { galleryTitle } from "src/core/galleries"; +import * as GQL from "src/core/generated-graphql"; + +interface IFile { + path: string; +} +interface IGallery { + id: string; + files: IFile[]; + folder?: GQL.Maybe; + title: GQL.Maybe; +} interface IProps { tag?: Partial; @@ -19,8 +31,8 @@ interface IProps { performer?: Partial; marker?: Partial; movie?: Partial; - scene?: Partial; - gallery?: Partial; + scene?: Partial>; + gallery?: Partial; className?: string; } @@ -57,14 +69,10 @@ export const TagLink: React.FC = (props: IProps) => { } - ${TextUtils.secondsToTimestamp(props.marker.seconds || 0)}`; } else if (props.gallery) { link = `/galleries/${props.gallery.id}`; - title = props.gallery.title - ? props.gallery.title - : TextUtils.fileNameFromPath(props.gallery.path ?? ""); + title = galleryTitle(props.gallery); } else if (props.scene) { link = `/scenes/${props.scene.id}`; - title = props.scene.title - ? props.scene.title - : TextUtils.fileNameFromPath(props.scene.path ?? ""); + title = objectTitle(props.scene); } return ( diff --git a/ui/v2.5/src/components/Stats.tsx b/ui/v2.5/src/components/Stats.tsx index 69007f122..7cc17b59a 100644 --- a/ui/v2.5/src/components/Stats.tsx +++ b/ui/v2.5/src/components/Stats.tsx @@ -2,7 +2,6 @@ import React from "react"; import { useStats } from "src/core/StashService"; import { FormattedMessage, FormattedNumber } from "react-intl"; import { LoadingIndicator } from "src/components/Shared"; -import Changelog from "src/components/Changelog/Changelog"; import { TextUtils } from "src/utils"; export const Stats: React.FC = () => { @@ -115,9 +114,6 @@ export const Stats: React.FC = () => {

    -
    - -
    ); }; diff --git a/ui/v2.5/src/components/Tagger/scenes/StashSearchResult.tsx b/ui/v2.5/src/components/Tagger/scenes/StashSearchResult.tsx index 33b6887f9..e4a32bc58 100755 --- a/ui/v2.5/src/components/Tagger/scenes/StashSearchResult.tsx +++ b/ui/v2.5/src/components/Tagger/scenes/StashSearchResult.tsx @@ -74,30 +74,65 @@ const getDurationStatus = ( ); }; +function matchPhashes( + scenePhashes: Pick[], + fingerprints: GQL.StashBoxFingerprint[] +) { + const phashes = fingerprints.filter((f) => f.algorithm === "PHASH"); + + const matches: { [key: string]: number } = {}; + phashes.forEach((p) => { + let bestMatch = -1; + scenePhashes.forEach((fp) => { + const d = distance(p.hash, fp.value); + + if (d <= 8 && (bestMatch === -1 || d < bestMatch)) { + bestMatch = d; + } + }); + + if (bestMatch !== -1) { + matches[p.hash] = bestMatch; + } + }); + + return matches; +} + const getFingerprintStatus = ( scene: IScrapedScene, stashScene: GQL.SlimSceneDataFragment ) => { - const checksumMatch = scene.fingerprints?.some( - (f) => f.hash === stashScene.checksum || f.hash === stashScene.oshash + const checksumMatch = scene.fingerprints?.some((f) => + stashScene.files.some((ff) => + ff.fingerprints.some( + (fp) => + fp.value === f.hash && (fp.type === "oshash" || fp.type === "md5") + ) + ) ); - const phashMatches = stashScene.phash - ? scene.fingerprints?.filter( - (f) => - f.algorithm === "PHASH" && distance(f.hash, stashScene.phash) <= 8 - ) ?? [] - : []; + + const allPhashes = stashScene.files.reduce( + (pv: Pick[], cv) => { + return [...pv, ...cv.fingerprints.filter((f) => f.type === "phash")]; + }, + [] + ); + + const phashMatches = matchPhashes(allPhashes, scene.fingerprints ?? []); const phashList = (
    - {phashMatches.map((fp) => ( -
    - {fp.hash} - {fp.hash === stashScene.phash - ? ", Exact match" - : `, distance ${distance(fp.hash, stashScene.phash)}`} -
    - ))} + {Object.entries(phashMatches).map((fp) => { + const hash = fp[0]; + const d = fp[1]; + return ( +
    + {hash} + {d === 0 ? ", Exact match" : `, distance ${d}`} +
    + ); + })}
    ); @@ -624,6 +659,9 @@ const StashSearchResult: React.FC = ({ return ; } + const stashSceneFile = + stashScene.files.length > 0 ? stashScene.files[0] : undefined; + return ( <>
    @@ -640,7 +678,7 @@ const StashSearchResult: React.FC = ({ )} {maybeRenderDateField()} - {getDurationStatus(scene, stashScene.file?.duration)} + {getDurationStatus(scene, stashSceneFile?.duration)} {getFingerprintStatus(scene, stashScene)}
    diff --git a/ui/v2.5/src/components/Tagger/scenes/TaggerScene.tsx b/ui/v2.5/src/components/Tagger/scenes/TaggerScene.tsx index 87755c3b5..4a4c9de44 100644 --- a/ui/v2.5/src/components/Tagger/scenes/TaggerScene.tsx +++ b/ui/v2.5/src/components/Tagger/scenes/TaggerScene.tsx @@ -1,4 +1,4 @@ -import React, { useState, useContext, PropsWithChildren } from "react"; +import React, { useState, useContext, PropsWithChildren, useMemo } from "react"; import * as GQL from "src/core/generated-graphql"; import { Link } from "react-router-dom"; import { Button, Collapse, Form, InputGroup } from "react-bootstrap"; @@ -15,6 +15,7 @@ import { parsePath, prepareQueryString } from "src/components/Tagger/utils"; import { ScenePreview } from "src/components/Scenes/SceneCard"; import { TaggerStateContext } from "../context"; import { faChevronDown, faChevronUp } from "@fortawesome/free-solid-svg-icons"; +import { objectPath, objectTitle } from "src/core/files"; interface ITaggerSceneDetails { scene: GQL.SlimSceneDataFragment; @@ -29,7 +30,7 @@ const TaggerSceneDetails: React.FC = ({ scene }) => {
    -

    {scene.title}

    +

    {objectTitle(scene)}

    {scene.studio?.name} {scene.studio?.name && scene.date && ` • `} @@ -100,17 +101,22 @@ export const TaggerScene: React.FC> = ({ const [queryString, setQueryString] = useState(""); const [queryLoading, setQueryLoading] = useState(false); - const { paths, file } = parsePath(scene.path); + const { paths, file: basename } = parsePath(objectPath(scene)); const defaultQueryString = prepareQueryString( scene, paths, - file, + basename, config.mode, config.blacklist ); - const width = scene.file.width ? scene.file.width : 0; - const height = scene.file.height ? scene.file.height : 0; + const file = useMemo( + () => (scene.files.length > 0 ? scene.files[0] : undefined), + [scene] + ); + + const width = file?.width ? file.width : 0; + const height = file?.height ? file.height : 0; const isPortrait = height > width; async function query() { @@ -197,7 +203,7 @@ export const TaggerScene: React.FC> = ({
    - +
    diff --git a/ui/v2.5/src/core/config.ts b/ui/v2.5/src/core/config.ts index 007d70e32..7d0840f60 100644 --- a/ui/v2.5/src/core/config.ts +++ b/ui/v2.5/src/core/config.ts @@ -27,6 +27,7 @@ export type FrontPageContent = ISavedFilterRow | ICustomFilter; export interface IUIConfig { frontPageContent?: FrontPageContent[]; + lastNoteSeen?: number; } function recentlyReleased( diff --git a/ui/v2.5/src/core/files.ts b/ui/v2.5/src/core/files.ts new file mode 100644 index 000000000..78095bf65 --- /dev/null +++ b/ui/v2.5/src/core/files.ts @@ -0,0 +1,31 @@ +import { TextUtils } from "src/utils"; +import * as GQL from "src/core/generated-graphql"; + +interface IFile { + path: string; +} + +interface IObjectWithFiles { + files: IFile[]; +} + +interface IObjectWithTitleFiles extends IObjectWithFiles { + title: GQL.Maybe; +} + +export function objectTitle(s: Partial) { + if (s.title) { + return s.title; + } + if (s.files && s.files.length > 0) { + return TextUtils.fileNameFromPath(s.files[0].path); + } + return ""; +} + +export function objectPath(s: IObjectWithFiles) { + if (s.files && s.files.length > 0) { + return s.files[0].path; + } + return ""; +} diff --git a/ui/v2.5/src/core/galleries.ts b/ui/v2.5/src/core/galleries.ts new file mode 100644 index 000000000..21bf662cd --- /dev/null +++ b/ui/v2.5/src/core/galleries.ts @@ -0,0 +1,38 @@ +import { TextUtils } from "src/utils"; +import * as GQL from "src/core/generated-graphql"; + +interface IFile { + path: string; +} + +interface IGallery { + files: IFile[]; + folder?: GQL.Maybe; +} + +interface IGalleryWithTitle extends IGallery { + title: GQL.Maybe; +} + +export function galleryTitle(s: Partial) { + if (s.title) { + return s.title; + } + if (s.files && s.files.length > 0) { + return TextUtils.fileNameFromPath(s.files[0].path); + } + if (s.folder) { + return TextUtils.fileNameFromPath(s.folder.path); + } + return ""; +} + +export function galleryPath(s: IGallery) { + if (s.files && s.files.length > 0) { + return s.files[0].path; + } + if (s.folder) { + return s.folder.path; + } + return ""; +} diff --git a/ui/v2.5/src/components/Changelog/versions/v010.md b/ui/v2.5/src/docs/en/Changelog/v010.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v010.md rename to ui/v2.5/src/docs/en/Changelog/v010.md diff --git a/ui/v2.5/src/components/Changelog/versions/v0100.md b/ui/v2.5/src/docs/en/Changelog/v0100.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v0100.md rename to ui/v2.5/src/docs/en/Changelog/v0100.md diff --git a/ui/v2.5/src/components/Changelog/versions/v011.md b/ui/v2.5/src/docs/en/Changelog/v011.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v011.md rename to ui/v2.5/src/docs/en/Changelog/v011.md diff --git a/ui/v2.5/src/components/Changelog/versions/v0110.md b/ui/v2.5/src/docs/en/Changelog/v0110.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v0110.md rename to ui/v2.5/src/docs/en/Changelog/v0110.md diff --git a/ui/v2.5/src/components/Changelog/versions/v0120.md b/ui/v2.5/src/docs/en/Changelog/v0120.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v0120.md rename to ui/v2.5/src/docs/en/Changelog/v0120.md diff --git a/ui/v2.5/src/components/Changelog/versions/v0130.md b/ui/v2.5/src/docs/en/Changelog/v0130.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v0130.md rename to ui/v2.5/src/docs/en/Changelog/v0130.md diff --git a/ui/v2.5/src/components/Changelog/versions/v0131.md b/ui/v2.5/src/docs/en/Changelog/v0131.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v0131.md rename to ui/v2.5/src/docs/en/Changelog/v0131.md diff --git a/ui/v2.5/src/components/Changelog/versions/v0140.md b/ui/v2.5/src/docs/en/Changelog/v0140.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v0140.md rename to ui/v2.5/src/docs/en/Changelog/v0140.md diff --git a/ui/v2.5/src/components/Changelog/versions/v0150.md b/ui/v2.5/src/docs/en/Changelog/v0150.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v0150.md rename to ui/v2.5/src/docs/en/Changelog/v0150.md diff --git a/ui/v2.5/src/components/Changelog/versions/v0160.md b/ui/v2.5/src/docs/en/Changelog/v0160.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v0160.md rename to ui/v2.5/src/docs/en/Changelog/v0160.md diff --git a/ui/v2.5/src/components/Changelog/versions/v0161.md b/ui/v2.5/src/docs/en/Changelog/v0161.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v0161.md rename to ui/v2.5/src/docs/en/Changelog/v0161.md diff --git a/ui/v2.5/src/docs/en/Changelog/v0170.md b/ui/v2.5/src/docs/en/Changelog/v0170.md new file mode 100644 index 000000000..97befa26c --- /dev/null +++ b/ui/v2.5/src/docs/en/Changelog/v0170.md @@ -0,0 +1,15 @@ +After migrating, please run a scan on your entire library to populate missing data, and to ingest identical files which were previously ignored. + +### 💥 Known issues and other changes +* Missing covers are not currently regenerated. +* Import/export schema has changed and is incompatible with the previous version. + +### ✨ New Features +* Added support for identical files. Identical files are assigned to the same scene/gallery/image and can be viewed in File Info. ([#2676](https://github.com/stashapp/stash/pull/2676)) +* Added support for filtering and sorting by file count. ([#2744](https://github.com/stashapp/stash/pull/2744)) +* Added release notes dialog. ([#2726](https://github.com/stashapp/stash/pull/2726)) + +### 🎨 Improvements +* Object titles are now displayed as the file basename if the title is not explicitly set. The `Don't include file extension as part of the title` scan flag is no longer supported. +* `Set name, date, details from embedded file metadata` scan flag is no longer supported. This functionality may be implemented as a built-in scraper in the future. +* Moved Changelogs to Settings page. ([#2726](https://github.com/stashapp/stash/pull/2726)) \ No newline at end of file diff --git a/ui/v2.5/src/components/Changelog/versions/v020.md b/ui/v2.5/src/docs/en/Changelog/v020.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v020.md rename to ui/v2.5/src/docs/en/Changelog/v020.md diff --git a/ui/v2.5/src/components/Changelog/versions/v021.md b/ui/v2.5/src/docs/en/Changelog/v021.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v021.md rename to ui/v2.5/src/docs/en/Changelog/v021.md diff --git a/ui/v2.5/src/components/Changelog/versions/v030.md b/ui/v2.5/src/docs/en/Changelog/v030.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v030.md rename to ui/v2.5/src/docs/en/Changelog/v030.md diff --git a/ui/v2.5/src/components/Changelog/versions/v040.md b/ui/v2.5/src/docs/en/Changelog/v040.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v040.md rename to ui/v2.5/src/docs/en/Changelog/v040.md diff --git a/ui/v2.5/src/components/Changelog/versions/v050.md b/ui/v2.5/src/docs/en/Changelog/v050.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v050.md rename to ui/v2.5/src/docs/en/Changelog/v050.md diff --git a/ui/v2.5/src/components/Changelog/versions/v060.md b/ui/v2.5/src/docs/en/Changelog/v060.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v060.md rename to ui/v2.5/src/docs/en/Changelog/v060.md diff --git a/ui/v2.5/src/components/Changelog/versions/v070.md b/ui/v2.5/src/docs/en/Changelog/v070.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v070.md rename to ui/v2.5/src/docs/en/Changelog/v070.md diff --git a/ui/v2.5/src/components/Changelog/versions/v080.md b/ui/v2.5/src/docs/en/Changelog/v080.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v080.md rename to ui/v2.5/src/docs/en/Changelog/v080.md diff --git a/ui/v2.5/src/components/Changelog/versions/v090.md b/ui/v2.5/src/docs/en/Changelog/v090.md similarity index 100% rename from ui/v2.5/src/components/Changelog/versions/v090.md rename to ui/v2.5/src/docs/en/Changelog/v090.md diff --git a/ui/v2.5/src/docs/en/AutoTagging.md b/ui/v2.5/src/docs/en/Manual/AutoTagging.md similarity index 100% rename from ui/v2.5/src/docs/en/AutoTagging.md rename to ui/v2.5/src/docs/en/Manual/AutoTagging.md diff --git a/ui/v2.5/src/docs/en/Browsing.md b/ui/v2.5/src/docs/en/Manual/Browsing.md similarity index 100% rename from ui/v2.5/src/docs/en/Browsing.md rename to ui/v2.5/src/docs/en/Manual/Browsing.md diff --git a/ui/v2.5/src/docs/en/Captions.md b/ui/v2.5/src/docs/en/Manual/Captions.md similarity index 100% rename from ui/v2.5/src/docs/en/Captions.md rename to ui/v2.5/src/docs/en/Manual/Captions.md diff --git a/ui/v2.5/src/docs/en/Configuration.md b/ui/v2.5/src/docs/en/Manual/Configuration.md similarity index 100% rename from ui/v2.5/src/docs/en/Configuration.md rename to ui/v2.5/src/docs/en/Manual/Configuration.md diff --git a/ui/v2.5/src/docs/en/Contributing.md b/ui/v2.5/src/docs/en/Manual/Contributing.md similarity index 100% rename from ui/v2.5/src/docs/en/Contributing.md rename to ui/v2.5/src/docs/en/Manual/Contributing.md diff --git a/ui/v2.5/src/docs/en/Deduplication.md b/ui/v2.5/src/docs/en/Manual/Deduplication.md similarity index 100% rename from ui/v2.5/src/docs/en/Deduplication.md rename to ui/v2.5/src/docs/en/Manual/Deduplication.md diff --git a/ui/v2.5/src/docs/en/EmbeddedPlugins.md b/ui/v2.5/src/docs/en/Manual/EmbeddedPlugins.md similarity index 100% rename from ui/v2.5/src/docs/en/EmbeddedPlugins.md rename to ui/v2.5/src/docs/en/Manual/EmbeddedPlugins.md diff --git a/ui/v2.5/src/docs/en/ExternalPlugins.md b/ui/v2.5/src/docs/en/Manual/ExternalPlugins.md similarity index 100% rename from ui/v2.5/src/docs/en/ExternalPlugins.md rename to ui/v2.5/src/docs/en/Manual/ExternalPlugins.md diff --git a/ui/v2.5/src/docs/en/Galleries.md b/ui/v2.5/src/docs/en/Manual/Galleries.md similarity index 100% rename from ui/v2.5/src/docs/en/Galleries.md rename to ui/v2.5/src/docs/en/Manual/Galleries.md diff --git a/ui/v2.5/src/docs/en/Help.md b/ui/v2.5/src/docs/en/Manual/Help.md similarity index 100% rename from ui/v2.5/src/docs/en/Help.md rename to ui/v2.5/src/docs/en/Manual/Help.md diff --git a/ui/v2.5/src/docs/en/Identify.md b/ui/v2.5/src/docs/en/Manual/Identify.md similarity index 100% rename from ui/v2.5/src/docs/en/Identify.md rename to ui/v2.5/src/docs/en/Manual/Identify.md diff --git a/ui/v2.5/src/docs/en/Interactive.md b/ui/v2.5/src/docs/en/Manual/Interactive.md similarity index 100% rename from ui/v2.5/src/docs/en/Interactive.md rename to ui/v2.5/src/docs/en/Manual/Interactive.md diff --git a/ui/v2.5/src/docs/en/Interface.md b/ui/v2.5/src/docs/en/Manual/Interface.md similarity index 100% rename from ui/v2.5/src/docs/en/Interface.md rename to ui/v2.5/src/docs/en/Manual/Interface.md diff --git a/ui/v2.5/src/docs/en/Introduction.md b/ui/v2.5/src/docs/en/Manual/Introduction.md similarity index 100% rename from ui/v2.5/src/docs/en/Introduction.md rename to ui/v2.5/src/docs/en/Manual/Introduction.md diff --git a/ui/v2.5/src/docs/en/JSONSpec.md b/ui/v2.5/src/docs/en/Manual/JSONSpec.md similarity index 69% rename from ui/v2.5/src/docs/en/JSONSpec.md rename to ui/v2.5/src/docs/en/Manual/JSONSpec.md index 9d65970fe..0749f7170 100644 --- a/ui/v2.5/src/docs/en/JSONSpec.md +++ b/ui/v2.5/src/docs/en/Manual/JSONSpec.md @@ -2,24 +2,37 @@ The metadata given to Stash can be exported into the JSON format. This structure can be modified, or replicated by other means. The resulting data can then be imported again, giving the possibility for automatic scraping of all kinds. The format of this metadata bulk is a folder structure, containing the following folders: -* `downloads` +* `files` * `galleries` +* `images` * `performers` * `scenes` * `studios` * `movies` - -Additionally, it contains a `mappings.json` file. - -The mappings file contains a reference to all files within the folders, by including their checksum. All files in the aforementioned folders are named by their checksum (like `967ddf2e028f10fc8d36901833c25732.json`), which (at least in the case of galleries and scenes) is generated from the file that this metadata relates to. The algorithm for the checksum is MD5. +# File naming + +When exported, files are named with different formats depending on the object type: + +| Type | Format | +|------|--------| +| Files/Folders | `...json` | +| Galleries | `..json` or `..json` or `.json` | +| Images | `<title or first file basename>.<hash>.json` | +| Performers | `<name>.json` | +| Scenes | `<title or first file basename>.<hash>.json` | +| Studios | `<name>.json` | +| Movies | `<name>.json` | + +Note that the file naming is not significant when importing. All json files will be read from the subdirectories. + # Content of the json files In the following, the values of the according jsons will be shown. If the value should be a number, it is written with after comma values (like `29.98` or `50.0`), but still as a string. The meaning from most of them should be obvious due to the previous explanation or from the possible values stash offers when editing, otherwise a short comment will be added. The json values are given as strings, if not stated otherwise. Every new line will stand for a new value in the json. If the value is a list of objects, the values of that object will be shown indented. -If a value is empty in any but the `mappings.json` file, it can be left out of the file entirely. In the `mappings.json` however, all values must be present, if there are no objects of a type (for example, no performers), the value is simply null. +If a value is empty in any file, it can be left out of the file entirely. Many files have an `created_at` and `updated_at`, both are kept in the following format: ``` YYYY-MM-DDThh:mm:ssTZD @@ -29,22 +42,6 @@ Example: "created_at": "2019-05-03T21:36:58+01:00" ``` -## `mappings.json` -``` -performers - name - checksum -studios - name - checksum -galleries - path - checksum -scenes - path - checksum -``` - ## Performer ``` name @@ -112,100 +109,110 @@ created_at updated_at ``` -## Gallery -No files of this kind are generated yet. +## Image +``` +title +studio +rating (integer) +performers (list of strings, performers name) +tags (list of strings) +files (list of path strings) +galleries + zip_files (list of path strings) + folder_path + title (for user-created gallery) +created_at +updated_at +``` + +## Gallery +``` +title +studio +url +date +rating (integer) +details +performers (list of strings, performers name) +tags (list of strings) +zip_files (list of path strings) +folder_path +created_at +updated_at +``` + +## Files + +### Folder +``` +zip_file (path to containing zip file) +mod_time +type (= folder) +path +created_at +updated_at +``` + +### Video file +``` +zip_file (path to containing zip file) +mod_time +type (= video) +path +fingerprints + type + fingerprint +size +format +width +height +duration +video_codec +audio_codec +frame +bitrate +interactive (bool) +interactive_speed (integer) +created_at +updated_at +``` + +### Image file +``` +zip_file (path to containing zip file) +mod_time +type (= image) +path +fingerprints + type + fingerprint +size +format +width +height +created_at +updated_at +``` + +### Other files +``` +zip_file (path to containing zip file) +mod_time +type (= file) +path +fingerprints + type + fingerprint +size +created_at +updated_at +``` # In JSON format For those preferring the json-format, defined [here](https://json-schema.org/), the following format may be more interesting: -## mappings.json - -```json -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "https://github.com/stashapp/stash/wiki/JSON-Specification/mappings.json", - "title": "mappings", - "description": "The base file for the metadata. Referring to all other files with names, as well as providing the path to files.", - "type": "object", - "properties": { - "performers": { - "description": "Link to the performers files along with names", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "checksum": { - "type": "string" - } - }, - "required": ["name", "checksum"] - }, - "minItems": 0, - "uniqueItems": true - }, - "studios": { - "description": "Link to the studio files along with names", - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "checksum": { - "type": "string" - } - }, - "required": ["name", "checksum"] - }, - "minItems": 0, - "uniqueItems": true - }, - "galleries": { - "description": "Link to the gallery files along with the path to the content", - "type": "array", - "items": { - "type": "object", - "properties": { - "path": { - "type": "string" - }, - "checksum": { - "type": "string" - } - }, - "required": ["path", "checksum"] - }, - "minItems": 0, - "uniqueItems": true - }, - "scenes": { - "description": "Link to the scene files along with the path to the content", - "type": "array", - "items": { - "type": "object", - "properties": { - "path": { - "type": "string" - }, - "checksum": { - "type": "string" - } - }, - "required": ["path", "checksum"] - }, - "minItems": 0, - "uniqueItems": true - } - }, - "required": ["performers", "studios", "galleries", "scenes"] -} -``` ## performer.json ``` json @@ -439,45 +446,14 @@ For those preferring the json-format, defined [here](https://json-schema.org/), "minItems": 1, "uniqueItems": true }, - "file": { - "description": "Some technical data about the scenes file.", - "type": "object", - "properties": { - "size": { - "description": "The size of the file in bytes", - "type": "string" - }, - "duration": { - "description": "Duration of the scene in seconds. It is given with after comma values, such as 10.0 or 17.5", - "type": "string" - }, - "video_codec": { - "description": "The coding of the video part of the scene file. An example would be h264", - "type": "string" - }, - "audio_codec": { - "description": "The coding of the audio part of the scene file. An example would be aac", - "type": "string" - }, - "width": { - "description": "The width of the scene in pixels", - "type": "integer" - }, - "height": { - "description": "The height of the scene in pixels", - "type": "integer" - }, - "framerate": { - "description": "Framerate of the scene. It is given with after comma values, such as 29.95", - "type": "string" - }, - "bitrate": { - "description": "The bitrate of the video, in bits", - "type": "integer" - } - + "files": { + "description": "A list of paths of the files for this scene", + "type": "array", + "items": { + "type": "string" }, - "required": ["size", "duration", "video_codec", "audio_codec", "height", "width", "framerate", "bitrate"] + "minItems": 1, + "uniqueItems": true }, "created_at": { "description": "The time this studios data was added to the database. Format is YYYY-MM-DDThh:mm:ssTZD", @@ -491,7 +467,3 @@ For those preferring the json-format, defined [here](https://json-schema.org/), "required": ["files", "created_at", "updated_at"] } ``` - -## Gallery - -No files of this kind are created here yet diff --git a/ui/v2.5/src/docs/en/KeyboardShortcuts.md b/ui/v2.5/src/docs/en/Manual/KeyboardShortcuts.md similarity index 100% rename from ui/v2.5/src/docs/en/KeyboardShortcuts.md rename to ui/v2.5/src/docs/en/Manual/KeyboardShortcuts.md diff --git a/ui/v2.5/src/docs/en/Plugins.md b/ui/v2.5/src/docs/en/Manual/Plugins.md similarity index 100% rename from ui/v2.5/src/docs/en/Plugins.md rename to ui/v2.5/src/docs/en/Manual/Plugins.md diff --git a/ui/v2.5/src/docs/en/SceneFilenameParser.md b/ui/v2.5/src/docs/en/Manual/SceneFilenameParser.md similarity index 100% rename from ui/v2.5/src/docs/en/SceneFilenameParser.md rename to ui/v2.5/src/docs/en/Manual/SceneFilenameParser.md diff --git a/ui/v2.5/src/docs/en/ScraperDevelopment.md b/ui/v2.5/src/docs/en/Manual/ScraperDevelopment.md similarity index 100% rename from ui/v2.5/src/docs/en/ScraperDevelopment.md rename to ui/v2.5/src/docs/en/Manual/ScraperDevelopment.md diff --git a/ui/v2.5/src/docs/en/Scraping.md b/ui/v2.5/src/docs/en/Manual/Scraping.md similarity index 100% rename from ui/v2.5/src/docs/en/Scraping.md rename to ui/v2.5/src/docs/en/Manual/Scraping.md diff --git a/ui/v2.5/src/docs/en/Tagger.md b/ui/v2.5/src/docs/en/Manual/Tagger.md similarity index 100% rename from ui/v2.5/src/docs/en/Tagger.md rename to ui/v2.5/src/docs/en/Manual/Tagger.md diff --git a/ui/v2.5/src/docs/en/Tasks.md b/ui/v2.5/src/docs/en/Manual/Tasks.md similarity index 100% rename from ui/v2.5/src/docs/en/Tasks.md rename to ui/v2.5/src/docs/en/Manual/Tasks.md diff --git a/ui/v2.5/src/docs/en/MigrationNotes/32.md b/ui/v2.5/src/docs/en/MigrationNotes/32.md new file mode 100644 index 000000000..c19401aea --- /dev/null +++ b/ui/v2.5/src/docs/en/MigrationNotes/32.md @@ -0,0 +1,5 @@ +**For best results, ensure that zip-based gallery paths are correct by performing a scan and clean of your library using v0.16.1 prior to running this migration.** + +This migration significantly changes the way that stash stores information about your files. This migration is not reversible. + +After migrating, please run a scan on your entire library to populate missing data, and to ingest identical files which were previously ignored. diff --git a/ui/v2.5/src/docs/en/MigrationNotes/index.ts b/ui/v2.5/src/docs/en/MigrationNotes/index.ts new file mode 100644 index 000000000..df11b7bf2 --- /dev/null +++ b/ui/v2.5/src/docs/en/MigrationNotes/index.ts @@ -0,0 +1,7 @@ +import migration32 from "./32.md"; + +type Module = typeof migration32; + +export const migrationNotes: Record<number, Module> = { + 32: migration32, +}; diff --git a/ui/v2.5/src/docs/en/ReleaseNotes/index.ts b/ui/v2.5/src/docs/en/ReleaseNotes/index.ts new file mode 100644 index 000000000..8a1e5000e --- /dev/null +++ b/ui/v2.5/src/docs/en/ReleaseNotes/index.ts @@ -0,0 +1,16 @@ +import v0170 from "./v0170.md"; + +export type Module = typeof v0170; + +interface IReleaseNotes { + // handle should be in the form of YYYYMMDD + date: number; + content: Module; +} + +export const releaseNotes: IReleaseNotes[] = [ + { + date: 20220906, + content: v0170, + }, +]; diff --git a/ui/v2.5/src/docs/en/ReleaseNotes/v0170.md b/ui/v2.5/src/docs/en/ReleaseNotes/v0170.md new file mode 100644 index 000000000..773f94da4 --- /dev/null +++ b/ui/v2.5/src/docs/en/ReleaseNotes/v0170.md @@ -0,0 +1,10 @@ +After migrating, please run a scan on your entire library to populate missing data, and to ingest identical files which were previously ignored. + +### 💥 Known issues +* Missing covers are not currently regenerated. Need to consider further, especially around scene cover redesign. + +### Other changes: +* Import/export schema has changed and is incompatible with the previous version. +* Changelog has been moved from the stats page to a section in the Settings page. +* Object titles are now displayed as the file basename if the title is not explicitly set. The `Don't include file extension as part of the title` scan flag is no longer supported. +* `Set name, date, details from embedded file metadata` scan flag is no longer supported. This functionality may be implemented as a built-in scraper in the future. \ No newline at end of file diff --git a/ui/v2.5/src/index.scss b/ui/v2.5/src/index.scss index 1e122c07c..4735aa70b 100755 --- a/ui/v2.5/src/index.scss +++ b/ui/v2.5/src/index.scss @@ -14,6 +14,7 @@ @import "src/components/SceneFilenameParser/styles.scss"; @import "src/components/ScenePlayer/styles.scss"; @import "src/components/Settings/styles.scss"; +@import "src/components/Setup/styles.scss"; @import "src/components/Studios/styles.scss"; @import "src/components/Shared/styles.scss"; @import "src/components/Tags/styles.scss"; diff --git a/ui/v2.5/src/locales/en-GB.json b/ui/v2.5/src/locales/en-GB.json index f3faa50cb..a8d841fd9 100644 --- a/ui/v2.5/src/locales/en-GB.json +++ b/ui/v2.5/src/locales/en-GB.json @@ -189,6 +189,7 @@ }, "categories": { "about": "About", + "changelog": "Changelog", "interface": "Interface", "logs": "Logs", "metadata_providers": "Metadata Providers", @@ -604,6 +605,7 @@ "edit_entity_title": "Edit {count, plural, one {{singularEntity}} other {{pluralEntity}}}", "export_include_related_objects": "Include related objects in export", "export_title": "Export", + "dont_show_until_updated": "Don't show until next update", "lightbox": { "delay": "Delay (Sec)", "display_mode": { @@ -726,6 +728,7 @@ "false": "False", "favourite": "Favourite", "file": "file", + "file_count": "File Count", "file_info": "File Info", "file_mod_time": "File Modification Time", "files": "files", @@ -733,6 +736,7 @@ "filter": "Filter", "filter_name": "Filter name", "filters": "Filters", + "folder": "Folder", "framerate": "Frame Rate", "frames_per_second": "{value} frames per second", "front_page": { @@ -870,6 +874,7 @@ "rating": "Rating", "recently_added_objects": "Recently Added {objects}", "recently_released_objects": "Recently Released {objects}", + "release_notes": "Release Notes", "resolution": "Resolution", "scene": "Scene", "sceneTagger": "Scene Tagger", @@ -919,9 +924,10 @@ "migration_failed_error": "The following error was encountered while migrating the database:", "migration_failed_help": "Please make any necessary corrections and try again. Otherwise, raise a bug on the {githubLink} or seek help in the {discordLink}.", "migration_irreversible_warning": "The schema migration process is not reversible. Once the migration is performed, your database will be incompatible with previous versions of stash.", + "migration_notes": "Migration Notes", "migration_required": "Migration required", "perform_schema_migration": "Perform schema migration", - "schema_too_old": "Your current stash database is schema version <strong>{databaseSchema}</strong> and needs to be migrated to version <strong>{appSchema}</strong>. This version of Stash will not function without migrating the database." + "schema_too_old": "Your current stash database is schema version <strong>{databaseSchema}</strong> and needs to be migrated to version <strong>{appSchema}</strong>. This version of Stash will not function without migrating the database. If you do not wish to migrate, you will need to downgrade to a version that matches your database schema." }, "paths": { "database_filename_empty_for_default": "database filename (empty for default)", @@ -1019,5 +1025,6 @@ "videos": "Videos", "view_all": "View All", "weight": "Weight", - "years_old": "years old" + "years_old": "years old", + "zip_file_count": "Zip File Count" } diff --git a/ui/v2.5/src/models/list-filter/criteria/criterion.ts b/ui/v2.5/src/models/list-filter/criteria/criterion.ts index 828b5e1c8..23ef0ba5c 100644 --- a/ui/v2.5/src/models/list-filter/criteria/criterion.ts +++ b/ui/v2.5/src/models/list-filter/criteria/criterion.ts @@ -497,8 +497,11 @@ export class MandatoryNumberCriterionOption extends CriterionOption { } } -export function createMandatoryNumberCriterionOption(value: CriterionType) { - return new MandatoryNumberCriterionOption(value, value, value); +export function createMandatoryNumberCriterionOption( + value: CriterionType, + messageID?: string +) { + return new MandatoryNumberCriterionOption(messageID ?? value, value, value); } export class DurationCriterion extends Criterion<INumberValue> { diff --git a/ui/v2.5/src/models/list-filter/criteria/factory.ts b/ui/v2.5/src/models/list-filter/criteria/factory.ts index 28b330569..55d3a3991 100644 --- a/ui/v2.5/src/models/list-filter/criteria/factory.ts +++ b/ui/v2.5/src/models/list-filter/criteria/factory.ts @@ -75,6 +75,7 @@ export function makeCriteria(type: CriterionType = "none") { case "performer_count": case "performer_age": case "tag_count": + case "file_count": return new NumberCriterion( new MandatoryNumberCriterionOption(type, type) ); diff --git a/ui/v2.5/src/models/list-filter/galleries.ts b/ui/v2.5/src/models/list-filter/galleries.ts index 334c2685d..d3b3cd332 100644 --- a/ui/v2.5/src/models/list-filter/galleries.ts +++ b/ui/v2.5/src/models/list-filter/galleries.ts @@ -25,6 +25,10 @@ const sortByOptions = ["date", ...MediaSortByOptions] messageID: "image_count", value: "images_count", }, + { + messageID: "zip_file_count", + value: "file_count", + }, ]); const displayModeOptions = [ @@ -56,6 +60,7 @@ const criterionOptions = [ createStringCriterionOption("image_count"), StudiosCriterionOption, createStringCriterionOption("url"), + createMandatoryNumberCriterionOption("file_count", "zip_file_count"), ]; export const GalleryListFilterOptions = new ListFilterOptions( diff --git a/ui/v2.5/src/models/list-filter/images.ts b/ui/v2.5/src/models/list-filter/images.ts index 0f675cd4c..5102e72c5 100644 --- a/ui/v2.5/src/models/list-filter/images.ts +++ b/ui/v2.5/src/models/list-filter/images.ts @@ -19,9 +19,12 @@ import { DisplayMode } from "./types"; const defaultSortBy = "path"; -const sortByOptions = ["o_counter", "filesize", ...MediaSortByOptions].map( - ListFilterOptions.createSortBy -); +const sortByOptions = [ + "o_counter", + "filesize", + "file_count", + ...MediaSortByOptions, +].map(ListFilterOptions.createSortBy); const displayModeOptions = [DisplayMode.Grid, DisplayMode.Wall]; const criterionOptions = [ @@ -41,6 +44,7 @@ const criterionOptions = [ createMandatoryNumberCriterionOption("performer_age"), PerformerFavoriteCriterionOption, StudiosCriterionOption, + createMandatoryNumberCriterionOption("file_count"), ]; export const ImageListFilterOptions = new ListFilterOptions( defaultSortBy, diff --git a/ui/v2.5/src/models/list-filter/scenes.ts b/ui/v2.5/src/models/list-filter/scenes.ts index c53c7ce21..485f31e8c 100644 --- a/ui/v2.5/src/models/list-filter/scenes.ts +++ b/ui/v2.5/src/models/list-filter/scenes.ts @@ -30,6 +30,7 @@ const sortByOptions = [ "organized", "o_counter", "date", + "file_count", "filesize", "duration", "framerate", @@ -81,6 +82,7 @@ const criterionOptions = [ InteractiveCriterionOption, CaptionsCriterionOption, createMandatoryNumberCriterionOption("interactive_speed"), + createMandatoryNumberCriterionOption("file_count"), ]; export const SceneListFilterOptions = new ListFilterOptions( diff --git a/ui/v2.5/src/models/list-filter/types.ts b/ui/v2.5/src/models/list-filter/types.ts index 5194c04e6..f97a48446 100644 --- a/ui/v2.5/src/models/list-filter/types.ts +++ b/ui/v2.5/src/models/list-filter/types.ts @@ -133,4 +133,5 @@ export type CriterionType = | "performer_favorite" | "performer_age" | "duplicated" - | "ignore_auto_tag"; + | "ignore_auto_tag" + | "file_count"; diff --git a/ui/v2.5/src/models/sceneQueue.ts b/ui/v2.5/src/models/sceneQueue.ts index 3c558493a..0eb4ac6fd 100644 --- a/ui/v2.5/src/models/sceneQueue.ts +++ b/ui/v2.5/src/models/sceneQueue.ts @@ -1,9 +1,11 @@ import queryString from "query-string"; import { RouteComponentProps } from "react-router-dom"; -import { FilterMode } from "src/core/generated-graphql"; +import { FilterMode, Scene } from "src/core/generated-graphql"; import { ListFilterModel } from "./list-filter/filter"; import { SceneListFilterOptions } from "./list-filter/scenes"; +export type QueuedScene = Pick<Scene, "id" | "title" | "paths">; + interface IQueryParameters { qsort?: string; qsortd?: string; diff --git a/vendor/github.com/doug-martin/goqu/v9/.gitignore b/vendor/github.com/doug-martin/goqu/v9/.gitignore new file mode 100644 index 000000000..d884bf352 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/.gitignore @@ -0,0 +1,4 @@ +.idea +src +*.iml +coverage.* \ No newline at end of file diff --git a/vendor/github.com/doug-martin/goqu/v9/.golangci.yml b/vendor/github.com/doug-martin/goqu/v9/.golangci.yml new file mode 100644 index 000000000..01a93ea36 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/.golangci.yml @@ -0,0 +1,140 @@ +linters-settings: + funlen: + lines: 140 + statements: 50 + gci: + local-prefixes: github.com/golangci/golangci-lint + goconst: + min-len: 2 + min-occurrences: 2 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - whyNoLint + - wrapperFunc + - sqlQuery # used by tests + gocyclo: + min-complexity: 20 + goimports: + local-prefixes: github.com/golangci/golangci-lint + golint: + min-confidence: 0 + gomnd: + settings: + mnd: + # don't include the "operation" and "assign" + checks: argument,case,condition,return + govet: + check-shadowing: true + settings: + printf: + funcs: + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + nolintlint: + allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) + allow-unused: false # report any unused nolint directives + require-explanation: false # don't require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + exhaustive: + default-signifies-exhaustive: true + +linters: + # please, do not use `enable-all`: it's deprecated and will be removed soon. + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - asciicheck + - bodyclose + - deadcode + - depguard + - dogsled + - errcheck + - exportloopref + - exhaustive + - errcheck + - errorlint + - funlen + - forbidigo + - forcetypeassert + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - golint + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ifshort + - ineffassign + - lll + - makezero + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - rowserrcheck + - staticcheck + - structcheck + - stylecheck + - testpackage + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + + # don't enable: + # - gochecknoglobals + # - gocognit + # - godot + # - godox + # - goerr113 + # - interfacer + # - maligned + # - nestif + # - revive + # - wsl + # - wrapcheck + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - gomnd + + # https://github.com/go-critic/go-critic/issues/926 + - linters: + - gocritic + text: "unnecessaryDefer:" + +run: + skip-dirs: + - test/testdata_etc + - internal/cache + - internal/renameio + - internal/robustio \ No newline at end of file diff --git a/vendor/github.com/doug-martin/goqu/v9/CODE_OF_CONDUCT.md b/vendor/github.com/doug-martin/goqu/v9/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..fbab7764d --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at doug@dougamartin.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/doug-martin/goqu/v9/CONTRIBUTING.md b/vendor/github.com/doug-martin/goqu/v9/CONTRIBUTING.md new file mode 100644 index 000000000..659e0f27d --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/CONTRIBUTING.md @@ -0,0 +1,31 @@ +## Contributions + +I am always welcoming contributions of any type. Please open an issue or create a PR if you find an issue with any of the following. + +* An issue with Documentation +* You found the documentation lacking in some way + +If you have an issue with the package please include the following + +* The dialect you are using +* A description of the problem +* A short example of how to reproduce (if applicable) + +Without those basics it can be difficult to reproduce your issue locally. You may be asked for more information but that is a good starting point. + +### New Features + +New features and/or enhancements are great and I encourage you to either submit a PR or create an issue. In both cases include the following as the need/requirement may not be readily apparent. + +1. The use case +2. A short example + +If you are issuing a PR also also include the following + +1. Tests - otherwise the PR will not be merged +2. Documentation - otherwise the PR will not be merged +3. Examples - [If applicable] see example_test.go for examples + +If you find an issue you want to work on please comment on it letting other people know you are looking at it and I will assign the issue to you. + +If want to work on an issue but dont know where to start just leave a comment and I'll be more than happy to point you in the right direction. diff --git a/vendor/github.com/doug-martin/goqu/v9/HISTORY.md b/vendor/github.com/doug-martin/goqu/v9/HISTORY.md new file mode 100644 index 000000000..04e0ce169 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/HISTORY.md @@ -0,0 +1,404 @@ +# v9.18.0 +* [FEATURE] Add support for aliasing insert datasets to support upsert alias [#306](https://github.com/doug-martin/goqu/pull/306) - [@XIELongDragon](https://github.com/XIELongDragon) +* [FEATURE] Add support for aliasing BooleanExpressions [#307](https://github.com/doug-martin/goqu/pull/307) - [@XIELongDragon](https://github.com/XIELongDragon) + +# v9.17.0 +* [FEATURE] Add support bitwise operations [#303](https://github.com/doug-martin/goqu/pull/303) - [@XIELongDragon](https://github.com/XIELongDragon) +* [FEATURE] Add support for specifying tables to be locked in ForUpdate, ForNoKeyUpdate, ForKeyShare, ForShare [#299](https://github.com/doug-martin/goqu/pull/299) - [@jbub](https://github.com/jbub) + +# v9.16.0 +* [FEATURE] Allow ordering by case expression [#282](https://github.com/doug-martin/goqu/issues/282), [#292](https://github.com/doug-martin/goqu/pull/292) + +# v9.15.1 +* [FIX] Field indexes in the columnMap getting overridden [290](https://github.com/doug-martin/goqu/issues/290), [#291](https://github.com/doug-martin/goqu/pull/291) + +# v9.15.0 +* [FEATURE] Add SetDefaultPrepared which controls query value interpolation [#288](https://github.com/doug-martin/goqu/pull/288) - [@Deiz](https://github.com/Deiz) + +# v9.14.0 +* [FEATURE] GroupByAppend to the SelectDataset and SelectClauses [#276](https://github.com/doug-martin/goqu/pull/276), [#287](https://github.com/doug-martin/goqu/pull/287) - [@ashishkf](https://github.com/ashishkf) +* [FEATURE] Allow untagged fields to be ignored [#285](https://github.com/doug-martin/goqu/pull/285) - [@Deiz](https://github.com/Deiz) +* [FIX] Nil valuer [#277](https://github.com/doug-martin/goqu/pull/277) - [@benzolium](https://github.com/benzolium), [@Diggs](https://github.com/Diggs) +* [FIX] Fix old import URL in doc comments [#286](https://github.com/doug-martin/goqu/pull/286) - [@maito1201](https://github.com/maito1201) + +# v9.13.0 + +* [ADDED] ScanStructs, ScanVals to Scanner interface [#273](https://github.com/doug-martin/goqu/issues/273) - [@vlanse](https://github.com/vlanse) + +# v9.12.0 + +* Update golangci-lint and updates for all associated linters +* Update dependencies + * github.com/DATA-DOG/go-sqlmock v1.3.3 -> v1.5.0 + * github.com/denisenkom/go-mssqldb v0.0.0-20200206145737-bbfc9a55622e -> v0.10.0 + * github.com/go-sql-driver/mysql v1.4.1 -> v1.6.0 + * github.com/lib/pq v1.2.0 -> v1.10.1 + * github.com/mattn/go-sqlite3 v1.11.0 -> v1.14.7 + * github.com/stretchr/testify -> v1.4.0 -> v1.7.0 + + +# v9.11.1 + +* [FIXED] Avoid mutation of join slice for separate datasets when joins slice capacity is not yet reached [#261](https://github.com/doug-martin/goqu/pull/261) - [@fhaifler](https://github.com/fhaifler) + +# v9.11.0 + +* [FIXED] Use valid 'IS' operator for sqlserver dialect [#240](https://github.com/doug-martin/goqu/pull/240), [#239](https://github.com/doug-martin/goqu/pull/229) - [@vlanse](https://github.com/vlanse) +* [ADDED] Implement Orderable interface for SQL Functions [#251](https://github.com/doug-martin/goqu/pull/251) - [@GlebBeloded](https://github.com/GlebBeloded) +* [ADDED] Support for table hint in multi-table MySQL DELETE queries [#252](https://github.com/doug-martin/goqu/pull/252) - [@vlanse](https://github.com/vlanse) + +# v9.10.0 + +* [FIXED] SELECT inherits dialect from INSERT in INSERT FROM SELECT. [#229](https://github.com/doug-martin/goqu/pull/229), [#223](https://github.com/doug-martin/goqu/issues/223) - [@vlanse](https://github.com/vlanse) +* [FIXED] SQLServer dialect: support prepared statements with TOP. [#230](https://github.com/doug-martin/goqu/pull/230), [#225](https://github.com/doug-martin/goqu/issues/225) - [@vlanse](https://github.com/vlanse) +* [ADDED] IsPrepared to SQLExpression interface. [#231](https://github.com/doug-martin/goqu/pull/231) - [@vlanse](https://github.com/vlanse) + +# v9.9.0 + +* [FIXED] SQLite do not add FOR UPDATE in SELECT. [#218](https://github.com/doug-martin/goqu/pull/218) - [@vlanse](https://github.com/vlanse) +* [ADDED] Support for INSERT ON CONFLICT in SQLite. [#218](https://github.com/doug-martin/goqu/pull/218) - [@vlanse](https://github.com/vlanse) + +# v9.8.0 + +* [ADDED] Support for ANY and ALL operators. [#196](https://github.com/doug-martin/goqu/issues/196) +* [ADDED] Support for CASE statements [#193](https://github.com/doug-martin/goqu/issues/193) +* [ADDED] Support for getting column identifiers from AliasExpressions. [#203](https://github.com/doug-martin/goqu/issues/203) + +# v9.7.1 + +* Fix all formatting for golangci-lint +* Move to golangci-lint github action + +# v9.7.0 + +* [ADDED] Support for sqlserver dialect [#197](https://github.com/doug-martin/goqu/issues/197),[#205](https://github.com/doug-martin/goqu/issues/205) - [@vlanse](https://github.com/vlanse) + +# v9.6.0 + +* [ADDED] Support for Lateral queries [#182](https://github.com/doug-martin/goqu/issues/182) + +# v9.5.1 + +* [FIXED] WITH clause without a RETURNING clause will panic [#177](https://github.com/doug-martin/goqu/issues/177) +* [FIXED] SQlite dialect escapes single quotes wrong, leads to SQL syntax error [#178](https://github.com/doug-martin/goqu/issues/178) +* [FIXED] Fix ReturnsColumns() nil pointer panic [#181](https://github.com/doug-martin/goqu/issues/181) - [@yeaha](https://github.com/yeaha) +* [FIXED] SelectDataset From with Error [#183](https://github.com/doug-martin/goqu/issues/183) +* [FIXED] Unable to execute union with order by expression [#185](https://github.com/doug-martin/goqu/issues/185) + +# v9.5.0 + +* [ADDED] Ability to use regexp like, ilike, notlike, and notilike without a regexp [#172](https://github.com/doug-martin/goqu/issues/172) + +# v9.4.0 + +* [ADDED] Ability to scan into struct fields from multiple tables [#160](https://github.com/doug-martin/goqu/issues/160) + +# v9.3.0 + +* [ADDED] Using Update, Insert, or Delete datasets in sub selects and CTEs [#164](https://github.com/doug-martin/goqu/issues/164) + +# v9.2.0 + +* [ADDED] exec.Scanner: New exposed scanner supports iterative scanning [#157](https://github.com/doug-martin/goqu/pull/157) - [@akarl](https://github.com/akarl) + +# v9.1.0 + +* [FIXED] ExampleDoUpdate does't work in postgres [#156](https://github.com/doug-martin/goqu/issues/156) +* [FIXED] Issue with timezone being lost [#163](https://github.com/doug-martin/goqu/issues/163) + +# v9.0.1 + +* [FIXED] Issue where `NULL`, `TRUE` and `FALSE` are interpolated when using an `IS` clause. [#165](https://github.com/doug-martin/goqu/issues/165) + +# v9.0.0 + +* Changed `NULL`, `TRUE`, `FALSE` to not be interpolated when creating prepared statements. [#132](https://github.com/doug-martin/goqu/pull/132), [#158](https://github.com/doug-martin/goqu/pull/158) - [@marshallmcmullen](https://github.com/marshallmcmullen) +* Updated dependencies + * `github.com/lib/pq v1.1.1 -> v1.2.0` + * `github.com/mattn/go-sqlite3 v1.10.0 -> v1.11.0` + * `github.com/stretchr/testify v1.3.0 -> v1.4.0` + +## v8.6.0 + +* [ADDED] `SetError()` and `Error()` to all datasets. [#152](https://github.com/doug-martin/goqu/pull/152) and [#150](https://github.com/doug-martin/goqu/pull/150) - [@marshallmcmullen](https://github.com/marshallmcmullen) + +## v8.5.0 + +* [ADDED] Window Function support [#128](https://github.com/doug-martin/goqu/issues/128) - [@Xuyuanp](https://github.com/Xuyuanp) + +## v8.4.1 + +* [FIXED] Returning func be able to handle nil [#140](https://github.com/doug-martin/goqu/issues/140) + +## v8.4.0 + +* Created new `sqlgen` module to encapsulate sql generation + * Broke SQLDialect inti new SQL generators for each statement type. +* Test refactor + * Moved to a test case pattern to allow for quickly adding new test cases. + +## v8.3.2 + +* [FIXED] Data race during query factory initialization [#133](https://github.com/doug-martin/goqu/issues/133) and [#136](https://github.com/doug-martin/goqu/issues/136) - [@o1egl](https://github.com/o1egl) + +## v8.3.1 + +* [FIXED] InsertDataset.WithDialect return old dataset [#126](https://github.com/doug-martin/goqu/issues/126) - [@chen56](https://github.com/chen56) +* Test clean up and more testing pattern consistency + * Changed to use assertion methods off of suite + * Updated Equals assertions to have expected output first +* Increase overall test coverage. + +## v8.3.0 + +* [Added] Support for `DISTINCT ON` clauses [#119](https://github.com/doug-martin/goqu/issues/119) + +## v8.2.2 + +* [FIX] Scanner errors on pointers to primitive values [#122](https://github.com/doug-martin/goqu/issues/122) + +## v8.2.1 + +* [FIX] Return an error when an empty identifier is encountered [#115](https://github.com/doug-martin/goqu/issues/115) + +## v8.2.0 + +* [FIX] Fix reflection errors related to nil pointers and unexported fields [#118](https://github.com/doug-martin/goqu/issues/118) + * Unexported fields are ignored when creating a columnMap + * Nil embedded pointers will no longer cause a panic + * Fields on nil embedded pointers will be ignored when creating update or insert statements. +* [ADDED] You can now ingore embedded structs and their fields by using `db:"-"` tag on the embedded struct. + +## v8.1.0 + +* [ADDED] Support column DEFAULT when inserting/updating via struct [#27](https://github.com/doug-martin/goqu/issues/27) + +## v8.0.1 + +* [ADDED] Multi table update support for `mysql` and `postgres` [#60](https://github.com/doug-martin/goqu/issues/60) +* [ADDED] `goqu.V` so values can be used on the LHS of expressions [#104](https://github.com/doug-martin/goqu/issues/104) + +## v8.0.0 + +A major change the the API was made in `v8` to seperate concerns between the different SQL statement types. + +**Why the change?** + +1. There were feature requests that could not be cleanly implemented with everything in a single dataset. +2. Too much functionality was encapsulated in a single datastructure. + * It was unclear what methods could be used for each SQL statement type. + * Changing a feature for one statement type had the possiblity of breaking another statement type. + * Test coverage was decent but was almost solely concerned about SELECT statements, breaking them up allowed for focused testing on each statement type. + * Most the SQL generation methods (`ToInsertSQL`, `ToUpdateSQL` etc.) took arguments which lead to an ugly API that was not uniform for each statement type, and proved to be inflexible. + +**What Changed** + +There are now five dataset types, `SelectDataset`, `InsertDataset`, `UpdateDataset`, `DeleteDataset` and `TruncateDataset` + +Each dataset type has its own entry point. + +* `goqu.From`, `Database#From`, `DialectWrapper#From` - Create SELECT +* `goqu.Insert`, `Database#Insert`, `DialectWrapper#Insert` - Create INSERT +* `goqu.Update`, `Database#db.Update`, `DialectWrapper#Update` - Create UPDATE +* `goqu.Delete`, `Database#Delete`, `DialectWrapper#Delete` - Create DELETE +* `goqu.Truncate`, `Database#Truncate`, `DialectWrapper#Truncate` - Create TRUNCATE + +`ToInsertSQL`, `ToUpdateSQL`, `ToDeleteSQL`, and `ToTruncateSQL` (and variations of them) methods have been removed from the `SelectDataset`. Instead use the `ToSQL` methods on each dataset type. + +Each dataset type will have an `Executor` and `ToSQL` method so a common interface can be created for each type. + + +## v7.4.0 + +* [FIXED] literalTime use t.UTC() , This behavior is different from the original sql.DB [#106](https://github.com/doug-martin/goqu/issues/106) - [chen56](https://github.com/chen56) +* [ADDED] Add new method WithTx for Database [#108](https://github.com/doug-martin/goqu/issues/108) - [Xuyuanp](https://github.com/Xuyuanp) + +## v7.3.1 + +* [ADDED] Exposed `goqu.NewTx` to allow creating a goqu tx directly from a `sql.Tx` instead of using `goqu.Database#Begin` [#95](https://github.com/doug-martin/goqu/issues/95) +* [ADDED] `goqu.Database.BeginTx` [#98](https://github.com/doug-martin/goqu/issues/98) + +## v7.3.0 + +* [ADDED] UPDATE and INSERT should use struct Field name if db tag is not specified [#57](https://github.com/doug-martin/goqu/issues/57) +* [CHANGE] Changed goqu.Database to accept a SQLDatabase interface to allow using goqu.Database with other libraries such as `sqlx` [#95](https://github.com/doug-martin/goqu/issues/95) + +## v7.2.0 + +* [FIXED] Sqlite3 does not accept SELECT * UNION (SELECT *) [#79](https://github.com/doug-martin/goqu/issues/79) +* [FIXED] Where(Ex{}) causes panics [mysql] [#49](https://github.com/doug-martin/goqu/issues/49) +* [ADDED] Support for OrderPrepend [#61](https://github.com/doug-martin/goqu/issues/61) +* [DOCS] Added new section about loading a dialect and using it to build SQL [#44](https://github.com/doug-martin/goqu/issues/44) + +## v7.1.0 + +* [FIXED] Embedded pointers with property names that duplicate parent struct properties. [#23](https://github.com/doug-martin/goqu/issues/23) +* [FIXED] Can't scan values using []byte or []string [#90](https://github.com/doug-martin/goqu/issues/90) + * When a slice that is `*sql.RawBytes`, `*[]byte` or `sql.Scanner` no errors will be returned. + +## v7.0.1 + +* Fix issue where structs with pointer fields where not set properly [#86](https://github.com/doug-martin/goqu/pull/86) and [#89](https://github.com/doug-martin/goqu/pull/89) - [@efureev](https://github.com/efureev) + +## v7.0.0 + +**Linting** +* Add linting checks and fixed errors + * Renamed all snake_case variables to be camelCase. + * Fixed examples to always map to a defined method +* Renamed `adapters` to `dialect` to more closely match their intended purpose. + +**API Changes** +* Updated all sql generations methods to from `Sql` to `SQL` + * `ToSql` -> `ToSQL` + * `ToInsertSql` -> `ToInsertSQL` + * `ToUpdateSql` -> `ToUpdateSQL` + * `ToDeleteSql` -> `ToDeleteSQL` + * `ToTruncateSql` -> `ToTruncateSQL` +* Abstracted out `dialect_options` from the adapter to make the dialect self contained. + * This also removed the dataset<->adapter co dependency making the dialect self contained. +* Refactored the `goqu.I` method. + * Added new `goqu.S`, `goqu.T` and `goqu.C` methods to clarify why type of identifier you are using. + * `goqu.I` should only be used when you have a qualified identifier (e.g. `goqu.I("my_schema.my_table.my_col") +* Added new `goqu.Dialect` method to make using `goqu` as an SQL builder easier. + +**Internal Changes** +* Pulled expressions into their own package + * Broke up expressions.go into multiple files to make working with and defining them easier. + * Moved the user facing methods into the main `goqu` to keep the same API as before. +* Added more examples +* Moved non-user facing structs and interfaces to internal modules to clean up API. +* Increased test coverage. + + +## v6.1.0 + +* Handle nil *time.Time Literal [#73](https://github.com/doug-martin/goqu/pull/73) and [#52](https://github.com/doug-martin/goqu/pull/52) - [@RoarkeRandall](https://github.com/RoarkeRandall) and [@quetz](https://github.com/quetz) +* Add ability to change column rename function [#66](https://github.com/doug-martin/goqu/pull/66) - [@blainehansen](https://github.com/blainehansen) + +## v6.0.0 + +* Updated go support to `1.10`, `1.11` and `1.12` +* Change testify dependency from c2fo/testify back to stretchr/testify. +* Add support for "FOR UPDATE" and "SKIP LOCKED" [#62](https://github.com/doug-martin/goqu/pull/62) - [@btubbs](https://github.com/btubbs) +* Changed to use go modules + +## v5.0.0 + +* Drop go 1.6 support, supported versions are `1.8`, `1.9` and latest +* Add context support [#64](https://github.com/doug-martin/goqu/pull/64) - [@cmoad](https://github.com/cmoad) + +## v4.2.0 + +* Add support for ON CONFLICT when using a dataset [#55](https://github.com/doug-martin/goqu/pull/55) - [@bobrnor](https://github.com/bobrnor) + +## v4.1.0 + +* Support for defining WITH clauses for Common Table Expressions (CTE) [#39](https://github.com/doug-martin/goqu/pull/39) - [@Oscil8](https://github.com/Oscil8) + +## v4.0 + +* Prepared(true) issues when using IS NULL comparisson operation [#33](https://github.com/doug-martin/goqu/pull/33) - [@danielfbm](https://github.com/danielfbm) + +## v3.3 + +* Add `upsert` support via `InsertIgnore` and `InsertConflict` methods - [#25](https://github.com/doug-martin/goqu/pull/28) - [@aheuermann](https://github.com/aheuermann) +* Adding vendor dependencies and updating tests to run in docker containers [#29](https://github.com/doug-martin/goqu/pull/29) - [@aheuermann](https://github.com/aheuermann) + +## v3.2 + +* Add range clauses ([NOT] BETWEEN) support - [#25](https://github.com/doug-martin/goqu/pull/25) - [@denisvm](https://github.com/denisvm) +* Readmefix [#26](https://github.com/doug-martin/goqu/pull/26) - [@tiagopotencia](https://github.com/tiagopotencia) + +## v3.1.3 + +* Bugfix for chained Where() [#20](https://github.com/doug-martin/goqu/pull/20) - [@Emreu](https://github.com/Emreu) + + +## v3.1.2 + +* Fixing ScanStruct issue with embedded pointers in crud_exec [#20](https://github.com/doug-martin/goqu/pull/20) - [@ruzz311](https://github.com/ruzz311) + +## v3.1.1 + +* Fixing race condition with struct_map_cache in crud_exec [#18](https://github.com/doug-martin/goqu/pull/18) - [@andymoon](https://github.com/andymoon), [@aheuermann](https://github.com/aheuermann) + +## v3.1.0 + +* Version 3.1 [#14](https://github.com/doug-martin/goqu/pull/14) - [@andymoon](https://github.com/andymoon) + * Fix an issue with a nil pointer access on the inserts and updates. + * Allowing ScanStructs to take a struct with an embedded pointer to a struct. + * Change to check if struct is Anonymous when recursing through an embedded struct. + * Updated to use the latest version of github.com/DATA-DOG/go-sqlmock. + +## v3.0.1 + +* Add literal bytes and update to c2fo testify [#15](https://github.com/doug-martin/goqu/pull/15) - [@TechnotronicOz](https://github.com/TechnotronicOz) + +## v3.0.0 + +* Added support for embedded structs when inserting or updating. [#13](https://github.com/doug-martin/goqu/pull/13) - [@andymoon](https://github.com/andymoon) + +## v2.0.3 + +* Fixed issue with transient columns and the auto select of columns. + +## v2.0.2 + +* Changed references to "github.com/doug-martin/goqu" to "gopkg.in/doug-martin/goqu.v2" + +## v2.0.1 + +* Fixed issue when `ScanStruct(s)` was used with `SelectDistinct` and caused a panic. + +## v2.0.0 + +* When scanning a struct or slice of structs, the struct(s) will be parsed for the column names to select. [#9](https://github.com/doug-martin/goqu/pull/9) - [@technotronicoz](https://github.com/TechnotronicOz) + +## v1.0.0 + +* You can now passed an IdentiferExpression to `As` [#8](https://github.com/doug-martin/goqu/pull/8) - [@croachrose](https://github.com/croachrose) +* Added info about installation through [gopkg.in](http://labix.org/gopkg.in) + +## v0.3.1 + +* Fixed issue setting Logger when starting a new transaction. + +## v0.3.0 + +* Changed sql generation methods to use a common naming convention. `To(Sql|Insert|Update|Delete)` + * Also changed to have common return values `string, []interface{}, error)` +* Added `Dataset.Prepared` which allows a user to specify whether or not SQL should be interpolated. [#7](https://github.com/doug-martin/goqu/issues/7) +* Updated Docs + * More examples +* Increased test coverage. + +## v0.2.0 + +* Changed `CrudExec` to not wrap driver errors in a GoquError [#2](https://github.com/doug-martin/goqu/issues/2) +* Added ability to use a dataset in an `Ex` map or `Eq` expression without having to use `In` [#3](https://github.com/doug-martin/goqu/issues/3) + * `db.From("test").Where(goqu.Ex{"a": db.From("test").Select("b")})` +* Updated readme with links to [`DefaultAdapter`](https://godoc.org/github.com/doug-martin/goqu#DefaultAdapter) + +## v0.1.1 + +* Added SQLite3 adapter [#1](https://github.com/doug-martin/goqu/pull/1) - [@mattn](https://github.com/mattn) + +## v0.1.0 + +* Added: + * [`Ex`](https://godoc.org/github.com/doug-martin/goqu#Ex) + * [`ExOr`](https://godoc.org/github.com/doug-martin/goqu#ExOr) + * [`Op`](https://godoc.org/github.com/doug-martin/goqu#Op) +* More tests and examples +* Added CONTRIBUTING.md +* Added LICENSE information +* Removed godoc introduction in favor of just maintaining the README. + +## v0.0.2 + +* Fixed issue with goqu.New not returning a pointer to a Database + +## v0.0.1 + +* Initial release diff --git a/vendor/github.com/doug-martin/goqu/v9/LICENSE b/vendor/github.com/doug-martin/goqu/v9/LICENSE new file mode 100644 index 000000000..a21ac62a3 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Doug Martin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/doug-martin/goqu/v9/Makefile b/vendor/github.com/doug-martin/goqu/v9/Makefile new file mode 100644 index 000000000..38aa8958d --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/Makefile @@ -0,0 +1,6 @@ +#phony dependency task that does nothing +#"make executable" does not run if there is a ./executable directory, unless the task has a dependency +phony: + +lint: + docker run --rm -v ${CURDIR}:/app -w /app golangci/golangci-lint:v1.23.8 golangci-lint run -v diff --git a/vendor/github.com/doug-martin/goqu/v9/README.md b/vendor/github.com/doug-martin/goqu/v9/README.md new file mode 100644 index 000000000..1f470fdb9 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/README.md @@ -0,0 +1,310 @@ +``` + __ _ ___ __ _ _ _ + / _` |/ _ \ / _` | | | | +| (_| | (_) | (_| | |_| | + \__, |\___/ \__, |\__,_| + |___/ |_| +``` +[![GitHub tag](https://img.shields.io/github/tag/doug-martin/goqu.svg?style=flat)](https://github.com/doug-martin/goqu/releases) +[![Test](https://github.com/doug-martin/goqu/workflows/Test/badge.svg?branch=master&event=push)](https://github.com/doug-martin/goqu/actions?query=workflow%3ATest+and+branch%3Amaster+) +[![Go Reference](https://pkg.go.dev/badge/github.com/doug-martin/goqu/v9.svg)](https://pkg.go.dev/github.com/doug-martin/goqu/v9) +[![codecov](https://codecov.io/gh/doug-martin/goqu/branch/master/graph/badge.svg)](https://codecov.io/gh/doug-martin/goqu) +[![Go Report Card](https://goreportcard.com/badge/github.com/doug-martin/goqu/v9)](https://goreportcard.com/report/github.com/doug-martin/goqu/v9) + +`goqu` is an expressive SQL builder and executor + +If you are upgrading from an older version please read the [Migrating Between Versions](./docs/version_migration.md) docs. + + +## Installation + +If using go modules. + +```sh +go get -u github.com/doug-martin/goqu/v9 +``` + +If you are not using go modules... + +**NOTE** You should still be able to use this package if you are using go version `>v1.10` but, you will need to drop the version from the package. `import "github.com/doug-martin/goqu/v9` -> `import "github.com/doug-martin/goqu"` + +```sh +go get -u github.com/doug-martin/goqu +``` + +### [Migrating Between Versions](./docs/version_migration.md) + +## Features + +`goqu` comes with many features but here are a few of the more notable ones + +* Query Builder +* Parameter interpolation (e.g `SELECT * FROM "items" WHERE "id" = ?` -> `SELECT * FROM "items" WHERE "id" = 1`) +* Built from the ground up with multiple dialects in mind +* Insert, Multi Insert, Update, and Delete support +* Scanning of rows to struct[s] or primitive value[s] + +While goqu may support the scanning of rows into structs it is not intended to be used as an ORM if you are looking for common ORM features like associations, +or hooks I would recommend looking at some of the great ORM libraries such as: + +* [gorm](https://github.com/jinzhu/gorm) +* [hood](https://github.com/eaigner/hood) + +## Why? + +We tried a few other sql builders but each was a thin wrapper around sql fragments that we found error prone. `goqu` was built with the following goals in mind: + +* Make the generation of SQL easy and enjoyable +* Create an expressive DSL that would find common errors with SQL at compile time. +* Provide a DSL that accounts for the common SQL expressions, NOT every nuance for each database. +* Provide developers the ability to: + * Use SQL when desired + * Easily scan results into primitive values and structs + * Use the native sql.Db methods when desired + +## Docs + +* [Dialect](./docs/dialect.md) - Introduction to different dialects (`mysql`, `postgres`, `sqlite3`, `sqlserver` etc) +* [Expressions](./docs/expressions.md) - Introduction to `goqu` expressions and common examples. +* [Select Dataset](./docs/selecting.md) - Docs and examples about creating and executing SELECT sql statements. +* [Insert Dataset](./docs/inserting.md) - Docs and examples about creating and executing INSERT sql statements. +* [Update Dataset](./docs/updating.md) - Docs and examples about creating and executing UPDATE sql statements. +* [Delete Dataset](./docs/deleting.md) - Docs and examples about creating and executing DELETE sql statements. +* [Prepared Statements](./docs/interpolation.md) - Docs about interpolation and prepared statements in `goqu`. +* [Database](./docs/database.md) - Docs and examples of using a Database to execute queries in `goqu` +* [Working with time.Time](./docs/time.md) - Docs on how to use alternate time locations. + +## Quick Examples + +### Select + +See the [select dataset](./docs/selecting.md) docs for more in depth examples + +```go +sql, _, _ := goqu.From("test").ToSQL() +fmt.Println(sql) +``` + +Output: + +``` +SELECT * FROM "test" +``` + +```go +sql, _, _ := goqu.From("test").Where(goqu.Ex{ + "d": []string{"a", "b", "c"}, +}).ToSQL() +fmt.Println(sql) +``` + +Output: + +``` +SELECT * FROM "test" WHERE ("d" IN ('a', 'b', 'c')) +``` + +### Insert + +See the [insert dataset](./docs/inserting.md) docs for more in depth examples + +```go +ds := goqu.Insert("user"). + Cols("first_name", "last_name"). + Vals( + goqu.Vals{"Greg", "Farley"}, + goqu.Vals{"Jimmy", "Stewart"}, + goqu.Vals{"Jeff", "Jeffers"}, + ) +insertSQL, args, _ := ds.ToSQL() +fmt.Println(insertSQL, args) +``` + +Output: +```sql +INSERT INTO "user" ("first_name", "last_name") VALUES ('Greg', 'Farley'), ('Jimmy', 'Stewart'), ('Jeff', 'Jeffers') [] +``` + +```go +ds := goqu.Insert("user").Rows( + goqu.Record{"first_name": "Greg", "last_name": "Farley"}, + goqu.Record{"first_name": "Jimmy", "last_name": "Stewart"}, + goqu.Record{"first_name": "Jeff", "last_name": "Jeffers"}, +) +insertSQL, args, _ := ds.ToSQL() +fmt.Println(insertSQL, args) +``` + +Output: +``` +INSERT INTO "user" ("first_name", "last_name") VALUES ('Greg', 'Farley'), ('Jimmy', 'Stewart'), ('Jeff', 'Jeffers') [] +``` + + +```go +type User struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` +} +ds := goqu.Insert("user").Rows( + User{FirstName: "Greg", LastName: "Farley"}, + User{FirstName: "Jimmy", LastName: "Stewart"}, + User{FirstName: "Jeff", LastName: "Jeffers"}, +) +insertSQL, args, _ := ds.ToSQL() +fmt.Println(insertSQL, args) +``` + +Output: +``` +INSERT INTO "user" ("first_name", "last_name") VALUES ('Greg', 'Farley'), ('Jimmy', 'Stewart'), ('Jeff', 'Jeffers') [] +``` + +```go +ds := goqu.Insert("user").Prepared(true). + FromQuery(goqu.From("other_table")) +insertSQL, args, _ := ds.ToSQL() +fmt.Println(insertSQL, args) +``` + +Output: +``` +INSERT INTO "user" SELECT * FROM "other_table" [] +``` + +```go +ds := goqu.Insert("user").Prepared(true). + Cols("first_name", "last_name"). + FromQuery(goqu.From("other_table").Select("fn", "ln")) +insertSQL, args, _ := ds.ToSQL() +fmt.Println(insertSQL, args) +``` + +Output: +``` +INSERT INTO "user" ("first_name", "last_name") SELECT "fn", "ln" FROM "other_table" [] +``` + +### Update + +See the [update dataset](./docs/updating.md) docs for more in depth examples + +```go +sql, args, _ := goqu.Update("items").Set( + goqu.Record{"name": "Test", "address": "111 Test Addr"}, +).ToSQL() +fmt.Println(sql, args) +``` + +Output: +``` +UPDATE "items" SET "address"='111 Test Addr',"name"='Test' [] +``` + +```go +type item struct { + Address string `db:"address"` + Name string `db:"name" goqu:"skipupdate"` +} +sql, args, _ := goqu.Update("items").Set( + item{Name: "Test", Address: "111 Test Addr"}, +).ToSQL() +fmt.Println(sql, args) +``` + +Output: +``` +UPDATE "items" SET "address"='111 Test Addr' [] +``` + +```go +sql, _, _ := goqu.Update("test"). + Set(goqu.Record{"foo": "bar"}). + Where(goqu.Ex{ + "a": goqu.Op{"gt": 10} + }).ToSQL() +fmt.Println(sql) +``` + +Output: +``` +UPDATE "test" SET "foo"='bar' WHERE ("a" > 10) +``` + +### Delete + +See the [delete dataset](./docs/deleting.md) docs for more in depth examples + +```go +ds := goqu.Delete("items") + +sql, args, _ := ds.ToSQL() +fmt.Println(sql, args) +``` + +```go +sql, _, _ := goqu.Delete("test").Where(goqu.Ex{ + "c": nil + }).ToSQL() +fmt.Println(sql) +``` + +Output: +``` +DELETE FROM "test" WHERE ("c" IS NULL) +``` + +<a name="contributions"></a> +## Contributions + +I am always welcoming contributions of any type. Please open an issue or create a PR if you find an issue with any of the following. + +* An issue with Documentation +* You found the documentation lacking in some way + +If you have an issue with the package please include the following + +* The dialect you are using +* A description of the problem +* A short example of how to reproduce (if applicable) + +Without those basics it can be difficult to reproduce your issue locally. You may be asked for more information but that is a good starting point. + +### New Features + +New features and/or enhancements are great and I encourage you to either submit a PR or create an issue. In both cases include the following as the need/requirement may not be readily apparent. + +1. The use case +2. A short example + +If you are issuing a PR also include the following + +1. Tests - otherwise the PR will not be merged +2. Documentation - otherwise the PR will not be merged +3. Examples - [If applicable] see example_test.go for examples + +If you find an issue you want to work on please comment on it letting other people know you are looking at it and I will assign the issue to you. + +If want to work on an issue but dont know where to start just leave a comment and I'll be more than happy to point you in the right direction. + +### Running tests +The test suite requires a postgres, mysql and sqlserver databases. You can override the connection strings with the [`MYSQL_URI`, `PG_URI`, `SQLSERVER_URI` environment variables](https://github.com/doug-martin/goqu/blob/2fe3349/docker-compose.yml#L26)* + +```sh +go test -v -race ./... +``` + +You can also run the tests in a container using [docker-compose](https://docs.docker.com/compose/). + +```sh +MYSQL_VERSION=8 POSTGRES_VERSION=13.4 SQLSERVER_VERSION=2017-CU8-ubuntu GO_VERSION=latest docker-compose run goqu +``` + +## License + +`goqu` is released under the [MIT License](http://www.opensource.org/licenses/MIT). + + + + + diff --git a/vendor/github.com/doug-martin/goqu/v9/_config.yml b/vendor/github.com/doug-martin/goqu/v9/_config.yml new file mode 100644 index 000000000..c4192631f --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-cayman \ No newline at end of file diff --git a/vendor/github.com/doug-martin/goqu/v9/codecov.yml b/vendor/github.com/doug-martin/goqu/v9/codecov.yml new file mode 100644 index 000000000..a3d9cd50b --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "**/mocks/**" # glob accepted + - "mocks/**" # glob accepted \ No newline at end of file diff --git a/vendor/github.com/doug-martin/goqu/v9/database.go b/vendor/github.com/doug-martin/goqu/v9/database.go new file mode 100644 index 000000000..3dce8d5c7 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/database.go @@ -0,0 +1,648 @@ +package goqu + +import ( + "context" + "database/sql" + "sync" + + "github.com/doug-martin/goqu/v9/exec" +) + +type ( + Logger interface { + Printf(format string, v ...interface{}) + } + // Interface for sql.DB, an interface is used so you can use with other + // libraries such as sqlx instead of the native sql.DB + SQLDatabase interface { + Begin() (*sql.Tx, error) + BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + } + // This struct is the wrapper for a Db. The struct delegates most calls to either an Exec instance or to the Db + // passed into the constructor. + Database struct { + logger Logger + dialect string + // nolint: stylecheck // keep for backwards compatibility + Db SQLDatabase + qf exec.QueryFactory + qfOnce sync.Once + } +) + +// This is the common entry point into goqu. +// +// dialect: This is the adapter dialect, you should see your database adapter for the string to use. Built in adapters +// can be found at https://github.com/doug-martin/goqu/tree/master/adapters +// +// db: A sql.Db to use for querying the database +// import ( +// "database/sql" +// "fmt" +// "github.com/doug-martin/goqu/v9" +// _ "github.com/doug-martin/goqu/v9/dialect/postgres" +// _ "github.com/lib/pq" +// ) +// +// func main() { +// sqlDb, err := sql.Open("postgres", "user=postgres dbname=goqupostgres sslmode=disable ") +// if err != nil { +// panic(err.Error()) +// } +// db := goqu.New("postgres", sqlDb) +// } +// The most commonly used Database method is From, which creates a new Dataset that uses the correct adapter and +// supports queries. +// var ids []uint32 +// if err := db.From("items").Where(goqu.I("id").Gt(10)).Pluck("id", &ids); err != nil { +// panic(err.Error()) +// } +// fmt.Printf("%+v", ids) +func newDatabase(dialect string, db SQLDatabase) *Database { + return &Database{ + logger: nil, + dialect: dialect, + Db: db, + qf: nil, + qfOnce: sync.Once{}, + } +} + +// returns this databases dialect +func (d *Database) Dialect() string { + return d.dialect +} + +// Starts a new Transaction. +func (d *Database) Begin() (*TxDatabase, error) { + sqlTx, err := d.Db.Begin() + if err != nil { + return nil, err + } + tx := NewTx(d.dialect, sqlTx) + tx.Logger(d.logger) + return tx, nil +} + +// Starts a new Transaction. See sql.DB#BeginTx for option description +func (d *Database) BeginTx(ctx context.Context, opts *sql.TxOptions) (*TxDatabase, error) { + sqlTx, err := d.Db.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + tx := NewTx(d.dialect, sqlTx) + tx.Logger(d.logger) + return tx, nil +} + +// WithTx starts a new transaction and executes it in Wrap method +func (d *Database) WithTx(fn func(*TxDatabase) error) error { + tx, err := d.Begin() + if err != nil { + return err + } + return tx.Wrap(func() error { return fn(tx) }) +} + +// Creates a new Dataset that uses the correct adapter and supports queries. +// var ids []uint32 +// if err := db.From("items").Where(goqu.I("id").Gt(10)).Pluck("id", &ids); err != nil { +// panic(err.Error()) +// } +// fmt.Printf("%+v", ids) +// +// from...: Sources for you dataset, could be table names (strings), a goqu.Literal or another goqu.Dataset +func (d *Database) From(from ...interface{}) *SelectDataset { + return newDataset(d.dialect, d.queryFactory()).From(from...) +} + +func (d *Database) Select(cols ...interface{}) *SelectDataset { + return newDataset(d.dialect, d.queryFactory()).Select(cols...) +} + +func (d *Database) Update(table interface{}) *UpdateDataset { + return newUpdateDataset(d.dialect, d.queryFactory()).Table(table) +} + +func (d *Database) Insert(table interface{}) *InsertDataset { + return newInsertDataset(d.dialect, d.queryFactory()).Into(table) +} + +func (d *Database) Delete(table interface{}) *DeleteDataset { + return newDeleteDataset(d.dialect, d.queryFactory()).From(table) +} + +func (d *Database) Truncate(table ...interface{}) *TruncateDataset { + return newTruncateDataset(d.dialect, d.queryFactory()).Table(table...) +} + +// Sets the logger for to use when logging queries +func (d *Database) Logger(logger Logger) { + d.logger = logger +} + +// Logs a given operation with the specified sql and arguments +func (d *Database) Trace(op, sqlString string, args ...interface{}) { + if d.logger != nil { + if sqlString != "" { + if len(args) != 0 { + d.logger.Printf("[goqu] %s [query:=`%s` args:=%+v]", op, sqlString, args) + } else { + d.logger.Printf("[goqu] %s [query:=`%s`]", op, sqlString) + } + } else { + d.logger.Printf("[goqu] %s", op) + } + } +} + +// Uses the db to Execute the query with arguments and return the sql.Result +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) Exec(query string, args ...interface{}) (sql.Result, error) { + return d.ExecContext(context.Background(), query, args...) +} + +// Uses the db to Execute the query with arguments and return the sql.Result +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + d.Trace("EXEC", query, args...) + return d.Db.ExecContext(ctx, query, args...) +} + +// Can be used to prepare a query. +// +// You can use this in tandem with a dataset by doing the following. +// sql, args, err := db.From("items").Where(goqu.I("id").Gt(10)).ToSQL(true) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// stmt, err := db.Prepare(sql) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// defer stmt.Close() +// rows, err := stmt.Query(args) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// defer rows.Close() +// for rows.Next(){ +// //scan your rows +// } +// if rows.Err() != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// +// query: The SQL statement to prepare. +func (d *Database) Prepare(query string) (*sql.Stmt, error) { + return d.PrepareContext(context.Background(), query) +} + +// Can be used to prepare a query. +// +// You can use this in tandem with a dataset by doing the following. +// sql, args, err := db.From("items").Where(goqu.I("id").Gt(10)).ToSQL(true) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// stmt, err := db.Prepare(sql) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// defer stmt.Close() +// rows, err := stmt.QueryContext(ctx, args) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// defer rows.Close() +// for rows.Next(){ +// //scan your rows +// } +// if rows.Err() != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// +// query: The SQL statement to prepare. +func (d *Database) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) { + d.Trace("PREPARE", query) + return d.Db.PrepareContext(ctx, query) +} + +// Used to query for multiple rows. +// +// You can use this in tandem with a dataset by doing the following. +// sql, err := db.From("items").Where(goqu.I("id").Gt(10)).ToSQL() +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// rows, err := stmt.Query(args) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// defer rows.Close() +// for rows.Next(){ +// //scan your rows +// } +// if rows.Err() != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) Query(query string, args ...interface{}) (*sql.Rows, error) { + return d.QueryContext(context.Background(), query, args...) +} + +// Used to query for multiple rows. +// +// You can use this in tandem with a dataset by doing the following. +// sql, err := db.From("items").Where(goqu.I("id").Gt(10)).ToSQL() +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// rows, err := stmt.QueryContext(ctx, args) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// defer rows.Close() +// for rows.Next(){ +// //scan your rows +// } +// if rows.Err() != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + d.Trace("QUERY", query, args...) + return d.Db.QueryContext(ctx, query, args...) +} + +// Used to query for a single row. +// +// You can use this in tandem with a dataset by doing the following. +// sql, err := db.From("items").Where(goqu.I("id").Gt(10)).Limit(1).ToSQL() +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// rows, err := stmt.QueryRow(args) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// //scan your row +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) QueryRow(query string, args ...interface{}) *sql.Row { + return d.QueryRowContext(context.Background(), query, args...) +} + +// Used to query for a single row. +// +// You can use this in tandem with a dataset by doing the following. +// sql, err := db.From("items").Where(goqu.I("id").Gt(10)).Limit(1).ToSQL() +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// rows, err := stmt.QueryRowContext(ctx, args) +// if err != nil{ +// panic(err.Error()) //you could gracefully handle the error also +// } +// //scan your row +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row { + d.Trace("QUERY ROW", query, args...) + return d.Db.QueryRowContext(ctx, query, args...) +} + +func (d *Database) queryFactory() exec.QueryFactory { + d.qfOnce.Do(func() { + d.qf = exec.NewQueryFactory(d) + }) + return d.qf +} + +// Queries the database using the supplied query, and args and uses CrudExec.ScanStructs to scan the results into a +// slice of structs +// +// i: A pointer to a slice of structs +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) ScanStructs(i interface{}, query string, args ...interface{}) error { + return d.ScanStructsContext(context.Background(), i, query, args...) +} + +// Queries the database using the supplied context, query, and args and uses CrudExec.ScanStructsContext to scan the +// results into a slice of structs +// +// i: A pointer to a slice of structs +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) ScanStructsContext(ctx context.Context, i interface{}, query string, args ...interface{}) error { + return d.queryFactory().FromSQL(query, args...).ScanStructsContext(ctx, i) +} + +// Queries the database using the supplied query, and args and uses CrudExec.ScanStruct to scan the results into a +// struct +// +// i: A pointer to a struct +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) ScanStruct(i interface{}, query string, args ...interface{}) (bool, error) { + return d.ScanStructContext(context.Background(), i, query, args...) +} + +// Queries the database using the supplied context, query, and args and uses CrudExec.ScanStructContext to scan the +// results into a struct +// +// i: A pointer to a struct +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) ScanStructContext(ctx context.Context, i interface{}, query string, args ...interface{}) (bool, error) { + return d.queryFactory().FromSQL(query, args...).ScanStructContext(ctx, i) +} + +// Queries the database using the supplied query, and args and uses CrudExec.ScanVals to scan the results into a slice +// of primitive values +// +// i: A pointer to a slice of primitive values +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) ScanVals(i interface{}, query string, args ...interface{}) error { + return d.ScanValsContext(context.Background(), i, query, args...) +} + +// Queries the database using the supplied context, query, and args and uses CrudExec.ScanValsContext to scan the +// results into a slice of primitive values +// +// i: A pointer to a slice of primitive values +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) ScanValsContext(ctx context.Context, i interface{}, query string, args ...interface{}) error { + return d.queryFactory().FromSQL(query, args...).ScanValsContext(ctx, i) +} + +// Queries the database using the supplied query, and args and uses CrudExec.ScanVal to scan the results into a +// primitive value +// +// i: A pointer to a primitive value +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) ScanVal(i interface{}, query string, args ...interface{}) (bool, error) { + return d.ScanValContext(context.Background(), i, query, args...) +} + +// Queries the database using the supplied context, query, and args and uses CrudExec.ScanValContext to scan the +// results into a primitive value +// +// i: A pointer to a primitive value +// +// query: The SQL to execute +// +// args...: for any placeholder parameters in the query +func (d *Database) ScanValContext(ctx context.Context, i interface{}, query string, args ...interface{}) (bool, error) { + return d.queryFactory().FromSQL(query, args...).ScanValContext(ctx, i) +} + +// A wrapper around a sql.Tx and works the same way as Database +type ( + // Interface for sql.Tx, an interface is used so you can use with other + // libraries such as sqlx instead of the native sql.DB + SQLTx interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + Commit() error + Rollback() error + } + TxDatabase struct { + logger Logger + dialect string + Tx SQLTx + qf exec.QueryFactory + qfOnce sync.Once + } +) + +// Creates a new TxDatabase +func NewTx(dialect string, tx SQLTx) *TxDatabase { + return &TxDatabase{dialect: dialect, Tx: tx} +} + +// returns this databases dialect +func (td *TxDatabase) Dialect() string { + return td.dialect +} + +// Creates a new Dataset for querying a Database. +func (td *TxDatabase) From(cols ...interface{}) *SelectDataset { + return newDataset(td.dialect, td.queryFactory()).From(cols...) +} + +func (td *TxDatabase) Select(cols ...interface{}) *SelectDataset { + return newDataset(td.dialect, td.queryFactory()).Select(cols...) +} + +func (td *TxDatabase) Update(table interface{}) *UpdateDataset { + return newUpdateDataset(td.dialect, td.queryFactory()).Table(table) +} + +func (td *TxDatabase) Insert(table interface{}) *InsertDataset { + return newInsertDataset(td.dialect, td.queryFactory()).Into(table) +} + +func (td *TxDatabase) Delete(table interface{}) *DeleteDataset { + return newDeleteDataset(td.dialect, td.queryFactory()).From(table) +} + +func (td *TxDatabase) Truncate(table ...interface{}) *TruncateDataset { + return newTruncateDataset(td.dialect, td.queryFactory()).Table(table...) +} + +// Sets the logger +func (td *TxDatabase) Logger(logger Logger) { + td.logger = logger +} + +func (td *TxDatabase) Trace(op, sqlString string, args ...interface{}) { + if td.logger != nil { + if sqlString != "" { + if len(args) != 0 { + td.logger.Printf("[goqu - transaction] %s [query:=`%s` args:=%+v] ", op, sqlString, args) + } else { + td.logger.Printf("[goqu - transaction] %s [query:=`%s`] ", op, sqlString) + } + } else { + td.logger.Printf("[goqu - transaction] %s", op) + } + } +} + +// See Database#Exec +func (td *TxDatabase) Exec(query string, args ...interface{}) (sql.Result, error) { + return td.ExecContext(context.Background(), query, args...) +} + +// See Database#ExecContext +func (td *TxDatabase) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + td.Trace("EXEC", query, args...) + return td.Tx.ExecContext(ctx, query, args...) +} + +// See Database#Prepare +func (td *TxDatabase) Prepare(query string) (*sql.Stmt, error) { + return td.PrepareContext(context.Background(), query) +} + +// See Database#PrepareContext +func (td *TxDatabase) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) { + td.Trace("PREPARE", query) + return td.Tx.PrepareContext(ctx, query) +} + +// See Database#Query +func (td *TxDatabase) Query(query string, args ...interface{}) (*sql.Rows, error) { + return td.QueryContext(context.Background(), query, args...) +} + +// See Database#QueryContext +func (td *TxDatabase) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + td.Trace("QUERY", query, args...) + return td.Tx.QueryContext(ctx, query, args...) +} + +// See Database#QueryRow +func (td *TxDatabase) QueryRow(query string, args ...interface{}) *sql.Row { + return td.QueryRowContext(context.Background(), query, args...) +} + +// See Database#QueryRowContext +func (td *TxDatabase) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row { + td.Trace("QUERY ROW", query, args...) + return td.Tx.QueryRowContext(ctx, query, args...) +} + +func (td *TxDatabase) queryFactory() exec.QueryFactory { + td.qfOnce.Do(func() { + td.qf = exec.NewQueryFactory(td) + }) + return td.qf +} + +// See Database#ScanStructs +func (td *TxDatabase) ScanStructs(i interface{}, query string, args ...interface{}) error { + return td.ScanStructsContext(context.Background(), i, query, args...) +} + +// See Database#ScanStructsContext +func (td *TxDatabase) ScanStructsContext(ctx context.Context, i interface{}, query string, args ...interface{}) error { + return td.queryFactory().FromSQL(query, args...).ScanStructsContext(ctx, i) +} + +// See Database#ScanStruct +func (td *TxDatabase) ScanStruct(i interface{}, query string, args ...interface{}) (bool, error) { + return td.ScanStructContext(context.Background(), i, query, args...) +} + +// See Database#ScanStructContext +func (td *TxDatabase) ScanStructContext(ctx context.Context, i interface{}, query string, args ...interface{}) (bool, error) { + return td.queryFactory().FromSQL(query, args...).ScanStructContext(ctx, i) +} + +// See Database#ScanVals +func (td *TxDatabase) ScanVals(i interface{}, query string, args ...interface{}) error { + return td.ScanValsContext(context.Background(), i, query, args...) +} + +// See Database#ScanValsContext +func (td *TxDatabase) ScanValsContext(ctx context.Context, i interface{}, query string, args ...interface{}) error { + return td.queryFactory().FromSQL(query, args...).ScanValsContext(ctx, i) +} + +// See Database#ScanVal +func (td *TxDatabase) ScanVal(i interface{}, query string, args ...interface{}) (bool, error) { + return td.ScanValContext(context.Background(), i, query, args...) +} + +// See Database#ScanValContext +func (td *TxDatabase) ScanValContext(ctx context.Context, i interface{}, query string, args ...interface{}) (bool, error) { + return td.queryFactory().FromSQL(query, args...).ScanValContext(ctx, i) +} + +// COMMIT the transaction +func (td *TxDatabase) Commit() error { + td.Trace("COMMIT", "") + return td.Tx.Commit() +} + +// ROLLBACK the transaction +func (td *TxDatabase) Rollback() error { + td.Trace("ROLLBACK", "") + return td.Tx.Rollback() +} + +// A helper method that will automatically COMMIT or ROLLBACK once the supplied function is done executing +// +// tx, err := db.Begin() +// if err != nil{ +// panic(err.Error()) // you could gracefully handle the error also +// } +// if err := tx.Wrap(func() error{ +// if _, err := tx.From("test").Insert(Record{"a":1, "b": "b"}).Exec(){ +// // this error will be the return error from the Wrap call +// return err +// } +// return nil +// }); err != nil{ +// panic(err.Error()) // you could gracefully handle the error also +// } +func (td *TxDatabase) Wrap(fn func() error) (err error) { + defer func() { + if p := recover(); p != nil { + _ = td.Rollback() + panic(p) + } + if err != nil { + if rollbackErr := td.Rollback(); rollbackErr != nil { + err = rollbackErr + } + } else { + if commitErr := td.Commit(); commitErr != nil { + err = commitErr + } + } + }() + return fn() +} diff --git a/vendor/github.com/doug-martin/goqu/v9/delete_dataset.go b/vendor/github.com/doug-martin/goqu/v9/delete_dataset.go new file mode 100644 index 000000000..320925a97 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/delete_dataset.go @@ -0,0 +1,244 @@ +package goqu + +import ( + "github.com/doug-martin/goqu/v9/exec" + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +var ErrBadFromArgument = errors.New("unsupported DeleteDataset#From argument, a string or identifier expression is required") + +type DeleteDataset struct { + dialect SQLDialect + clauses exp.DeleteClauses + isPrepared prepared + queryFactory exec.QueryFactory + err error +} + +// used internally by database to create a database with a specific adapter +func newDeleteDataset(d string, queryFactory exec.QueryFactory) *DeleteDataset { + return &DeleteDataset{ + clauses: exp.NewDeleteClauses(), + dialect: GetDialect(d), + queryFactory: queryFactory, + isPrepared: preparedNoPreference, + err: nil, + } +} + +func Delete(table interface{}) *DeleteDataset { + return newDeleteDataset("default", nil).From(table) +} + +func (dd *DeleteDataset) Expression() exp.Expression { + return dd +} + +// Clones the dataset +func (dd *DeleteDataset) Clone() exp.Expression { + return dd.copy(dd.clauses) +} + +// Set the parameter interpolation behavior. See examples +// +// prepared: If true the dataset WILL NOT interpolate the parameters. +func (dd *DeleteDataset) Prepared(prepared bool) *DeleteDataset { + ret := dd.copy(dd.clauses) + ret.isPrepared = preparedFromBool(prepared) + return ret +} + +// Returns true if Prepared(true) has been called on this dataset +func (dd *DeleteDataset) IsPrepared() bool { + return dd.isPrepared.Bool() +} + +// Sets the adapter used to serialize values and create the SQL statement +func (dd *DeleteDataset) WithDialect(dl string) *DeleteDataset { + ds := dd.copy(dd.GetClauses()) + ds.dialect = GetDialect(dl) + return ds +} + +// Returns the current SQLDialect on the dataset +func (dd *DeleteDataset) Dialect() SQLDialect { + return dd.dialect +} + +// Set the dialect for this dataset. +func (dd *DeleteDataset) SetDialect(dialect SQLDialect) *DeleteDataset { + cd := dd.copy(dd.GetClauses()) + cd.dialect = dialect + return cd +} + +// Returns the current clauses on the dataset. +func (dd *DeleteDataset) GetClauses() exp.DeleteClauses { + return dd.clauses +} + +// used interally to copy the dataset +func (dd *DeleteDataset) copy(clauses exp.DeleteClauses) *DeleteDataset { + return &DeleteDataset{ + dialect: dd.dialect, + clauses: clauses, + isPrepared: dd.isPrepared, + queryFactory: dd.queryFactory, + err: dd.err, + } +} + +// Creates a WITH clause for a common table expression (CTE). +// +// The name will be available to SELECT from in the associated query; and can optionally +// contain a list of column names "name(col1, col2, col3)". +// +// The name will refer to the results of the specified subquery. +func (dd *DeleteDataset) With(name string, subquery exp.Expression) *DeleteDataset { + return dd.copy(dd.clauses.CommonTablesAppend(exp.NewCommonTableExpression(false, name, subquery))) +} + +// Creates a WITH RECURSIVE clause for a common table expression (CTE) +// +// The name will be available to SELECT from in the associated query; and must +// contain a list of column names "name(col1, col2, col3)" for a recursive clause. +// +// The name will refer to the results of the specified subquery. The subquery for +// a recursive query will always end with a UNION or UNION ALL with a clause that +// refers to the CTE by name. +func (dd *DeleteDataset) WithRecursive(name string, subquery exp.Expression) *DeleteDataset { + return dd.copy(dd.clauses.CommonTablesAppend(exp.NewCommonTableExpression(true, name, subquery))) +} + +// Adds a FROM clause. This return a new dataset with the original sources replaced. See examples. +// You can pass in the following. +// string: Will automatically be turned into an identifier +// Dataset: Will be added as a sub select. If the Dataset is not aliased it will automatically be aliased +// LiteralExpression: (See Literal) Will use the literal SQL +func (dd *DeleteDataset) From(table interface{}) *DeleteDataset { + switch t := table.(type) { + case exp.IdentifierExpression: + return dd.copy(dd.clauses.SetFrom(t)) + case string: + return dd.copy(dd.clauses.SetFrom(exp.ParseIdentifier(t))) + default: + panic(ErrBadFromArgument) + } +} + +// Adds a WHERE clause. See examples. +func (dd *DeleteDataset) Where(expressions ...exp.Expression) *DeleteDataset { + return dd.copy(dd.clauses.WhereAppend(expressions...)) +} + +// Removes the WHERE clause. See examples. +func (dd *DeleteDataset) ClearWhere() *DeleteDataset { + return dd.copy(dd.clauses.ClearWhere()) +} + +// Adds a ORDER clause. If the ORDER is currently set it replaces it. See examples. +func (dd *DeleteDataset) Order(order ...exp.OrderedExpression) *DeleteDataset { + return dd.copy(dd.clauses.SetOrder(order...)) +} + +// Adds a more columns to the current ORDER BY clause. If no order has be previously specified it is the same as +// calling Order. See examples. +func (dd *DeleteDataset) OrderAppend(order ...exp.OrderedExpression) *DeleteDataset { + return dd.copy(dd.clauses.OrderAppend(order...)) +} + +// Adds a more columns to the beginning of the current ORDER BY clause. If no order has be previously specified it is the same as +// calling Order. See examples. +func (dd *DeleteDataset) OrderPrepend(order ...exp.OrderedExpression) *DeleteDataset { + return dd.copy(dd.clauses.OrderPrepend(order...)) +} + +// Removes the ORDER BY clause. See examples. +func (dd *DeleteDataset) ClearOrder() *DeleteDataset { + return dd.copy(dd.clauses.ClearOrder()) +} + +// Adds a LIMIT clause. If the LIMIT is currently set it replaces it. See examples. +func (dd *DeleteDataset) Limit(limit uint) *DeleteDataset { + if limit > 0 { + return dd.copy(dd.clauses.SetLimit(limit)) + } + return dd.copy(dd.clauses.ClearLimit()) +} + +// Adds a LIMIT ALL clause. If the LIMIT is currently set it replaces it. See examples. +func (dd *DeleteDataset) LimitAll() *DeleteDataset { + return dd.copy(dd.clauses.SetLimit(L("ALL"))) +} + +// Removes the LIMIT clause. +func (dd *DeleteDataset) ClearLimit() *DeleteDataset { + return dd.copy(dd.clauses.ClearLimit()) +} + +// Adds a RETURNING clause to the dataset if the adapter supports it. +func (dd *DeleteDataset) Returning(returning ...interface{}) *DeleteDataset { + return dd.copy(dd.clauses.SetReturning(exp.NewColumnListExpression(returning...))) +} + +// Get any error that has been set or nil if no error has been set. +func (dd *DeleteDataset) Error() error { + return dd.err +} + +// Set an error on the dataset if one has not already been set. This error will be returned by a future call to Error +// or as part of ToSQL. This can be used by end users to record errors while building up queries without having to +// track those separately. +func (dd *DeleteDataset) SetError(err error) *DeleteDataset { + if dd.err == nil { + dd.err = err + } + + return dd +} + +// Generates a DELETE sql statement, if Prepared has been called with true then the parameters will not be interpolated. +// See examples. +// +// Errors: +// * There is an error generating the SQL +func (dd *DeleteDataset) ToSQL() (sql string, params []interface{}, err error) { + return dd.deleteSQLBuilder().ToSQL() +} + +// Appends this Dataset's DELETE statement to the SQLBuilder +// This is used internally when using deletes in CTEs +func (dd *DeleteDataset) AppendSQL(b sb.SQLBuilder) { + if dd.err != nil { + b.SetError(dd.err) + return + } + dd.dialect.ToDeleteSQL(b, dd.GetClauses()) +} + +func (dd *DeleteDataset) GetAs() exp.IdentifierExpression { + return nil +} + +func (dd *DeleteDataset) ReturnsColumns() bool { + return dd.clauses.HasReturning() +} + +// Creates an QueryExecutor to execute the query. +// db.Delete("test").Exec() +// +// See Dataset#ToUpdateSQL for arguments +func (dd *DeleteDataset) Executor() exec.QueryExecutor { + return dd.queryFactory.FromSQLBuilder(dd.deleteSQLBuilder()) +} + +func (dd *DeleteDataset) deleteSQLBuilder() sb.SQLBuilder { + buf := sb.NewSQLBuilder(dd.isPrepared.Bool()) + if dd.err != nil { + return buf.SetError(dd.err) + } + dd.dialect.ToDeleteSQL(buf, dd.clauses) + return buf +} diff --git a/vendor/github.com/doug-martin/goqu/v9/dialect/sqlite3/sqlite3.go b/vendor/github.com/doug-martin/goqu/v9/dialect/sqlite3/sqlite3.go new file mode 100644 index 000000000..40ddb2a5d --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/dialect/sqlite3/sqlite3.go @@ -0,0 +1,76 @@ +package sqlite3 + +import ( + "time" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" +) + +func DialectOptions() *goqu.SQLDialectOptions { + opts := goqu.DefaultDialectOptions() + + opts.SupportsReturn = false + opts.SupportsOrderByOnUpdate = true + opts.SupportsLimitOnUpdate = true + opts.SupportsOrderByOnDelete = true + opts.SupportsLimitOnDelete = true + opts.SupportsConflictUpdateWhere = false + opts.SupportsInsertIgnoreSyntax = true + opts.SupportsConflictTarget = true + opts.SupportsMultipleUpdateTables = false + opts.WrapCompoundsInParens = false + opts.SupportsDistinctOn = false + opts.SupportsWindowFunction = false + opts.SupportsLateral = false + + opts.PlaceHolderFragment = []byte("?") + opts.IncludePlaceholderNum = false + opts.QuoteRune = '`' + opts.DefaultValuesFragment = []byte("") + opts.True = []byte("1") + opts.False = []byte("0") + opts.TimeFormat = time.RFC3339Nano + opts.BooleanOperatorLookup = map[exp.BooleanOperation][]byte{ + exp.EqOp: []byte("="), + exp.NeqOp: []byte("!="), + exp.GtOp: []byte(">"), + exp.GteOp: []byte(">="), + exp.LtOp: []byte("<"), + exp.LteOp: []byte("<="), + exp.InOp: []byte("IN"), + exp.NotInOp: []byte("NOT IN"), + exp.IsOp: []byte("IS"), + exp.IsNotOp: []byte("IS NOT"), + exp.LikeOp: []byte("LIKE"), + exp.NotLikeOp: []byte("NOT LIKE"), + exp.ILikeOp: []byte("LIKE"), + exp.NotILikeOp: []byte("NOT LIKE"), + exp.RegexpLikeOp: []byte("REGEXP"), + exp.RegexpNotLikeOp: []byte("NOT REGEXP"), + exp.RegexpILikeOp: []byte("REGEXP"), + exp.RegexpNotILikeOp: []byte("NOT REGEXP"), + } + opts.UseLiteralIsBools = false + opts.BitwiseOperatorLookup = map[exp.BitwiseOperation][]byte{ + exp.BitwiseOrOp: []byte("|"), + exp.BitwiseAndOp: []byte("&"), + exp.BitwiseLeftShiftOp: []byte("<<"), + exp.BitwiseRightShiftOp: []byte(">>"), + } + opts.EscapedRunes = map[rune][]byte{ + '\'': []byte("''"), + } + opts.InsertIgnoreClause = []byte("INSERT OR IGNORE INTO ") + opts.ConflictFragment = []byte(" ON CONFLICT ") + opts.ConflictDoUpdateFragment = []byte(" DO UPDATE SET ") + opts.ConflictDoNothingFragment = []byte(" DO NOTHING ") + opts.ForUpdateFragment = []byte("") + opts.OfFragment = []byte("") + opts.NowaitFragment = []byte("") + return opts +} + +func init() { + goqu.RegisterDialect("sqlite3", DialectOptions()) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/docker-compose.yml b/vendor/github.com/doug-martin/goqu/v9/docker-compose.yml new file mode 100644 index 000000000..3dd0c57dc --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/docker-compose.yml @@ -0,0 +1,62 @@ +version: "2" + +services: + postgres: + image: "postgres:${POSTGRES_VERSION}" + environment: + - "POSTGRES_USER=postgres" + - "POSTGRES_DB=goqupostgres" + - "POSTGRES_HOST_AUTH_METHOD=trust" + expose: + - "5432" + ports: + - "5432:5432" + + mysql: + image: "mysql:${MYSQL_VERSION}" + environment: + - "MYSQL_DATABASE=goqumysql" + - "MYSQL_ALLOW_EMPTY_PASSWORD=yes" + expose: + - "3306" + ports: + - "3306:3306" + + sqlserver: + image: "mcr.microsoft.com/mssql/server:${SQLSERVER_VERSION}" + environment: + - "ACCEPT_EULA=Y" + - "SA_PASSWORD=qwe123QWE" + expose: + - "1433" + ports: + - "1433:1433" + + goqu: + image: "golang:${GO_VERSION}" + command: ["./wait-for-it.sh", "postgres:5432", "--", "./wait-for-it.sh", "mysql:3306", "--", "go test -v -race ./..."] + working_dir: /go/src/github.com/doug-martin/goqu + volumes: + - "./:/go/src/github.com/doug-martin/goqu" + environment: + MYSQL_URI: 'root@tcp(mysql:3306)/goqumysql?parseTime=true' + PG_URI: 'postgres://postgres:@postgres:5432/goqupostgres?sslmode=disable' + SQLSERVER_URI: 'sqlserver://sa:qwe123QWE@sqlserver:1433?database=master&connection+timeout=30' + depends_on: + - postgres + - mysql + - sqlserver + goqu-coverage: + image: "golang:${GO_VERSION}" + command: ["./wait-for-it.sh", "postgres:5432", "--", "./wait-for-it.sh", "mysql:3306", "--", "./go.test.sh"] + working_dir: /go/src/github.com/doug-martin/goqu + volumes: + - "./:/go/src/github.com/doug-martin/goqu" + environment: + MYSQL_URI: 'root@tcp(mysql:3306)/goqumysql?parseTime=true' + PG_URI: 'postgres://postgres:@postgres:5432/goqupostgres?sslmode=disable' + SQLSERVER_URI: 'sqlserver://sa:qwe123QWE@sqlserver:1433?database=master&connection+timeout=30' + depends_on: + - postgres + - mysql + - sqlserver diff --git a/vendor/github.com/doug-martin/goqu/v9/exec/query_executor.go b/vendor/github.com/doug-martin/goqu/v9/exec/query_executor.go new file mode 100644 index 000000000..ce460dff2 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exec/query_executor.go @@ -0,0 +1,247 @@ +package exec + +import ( + "context" + gsql "database/sql" + "reflect" + + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/util" +) + +type ( + QueryExecutor struct { + de DbExecutor + err error + query string + args []interface{} + } +) + +var ( + errUnsupportedScanStructType = errors.New("type must be a pointer to a struct when scanning into a struct") + errUnsupportedScanStructsType = errors.New("type must be a pointer to a slice when scanning into structs") + errUnsupportedScanValsType = errors.New("type must be a pointer to a slice when scanning into vals") + errScanValPointer = errors.New("type must be a pointer when scanning into val") + errScanValNonSlice = errors.New("type cannot be a pointer to a slice when scanning into val") +) + +func newQueryExecutor(de DbExecutor, err error, query string, args ...interface{}) QueryExecutor { + return QueryExecutor{de: de, err: err, query: query, args: args} +} + +func (q QueryExecutor) ToSQL() (sql string, args []interface{}, err error) { + return q.query, q.args, q.err +} + +func (q QueryExecutor) Exec() (gsql.Result, error) { + return q.ExecContext(context.Background()) +} + +func (q QueryExecutor) ExecContext(ctx context.Context) (gsql.Result, error) { + if q.err != nil { + return nil, q.err + } + return q.de.ExecContext(ctx, q.query, q.args...) +} + +func (q QueryExecutor) Query() (*gsql.Rows, error) { + return q.QueryContext(context.Background()) +} + +func (q QueryExecutor) QueryContext(ctx context.Context) (*gsql.Rows, error) { + if q.err != nil { + return nil, q.err + } + return q.de.QueryContext(ctx, q.query, q.args...) +} + +// This will execute the SQL and append results to the slice +// var myStructs []MyStruct +// if err := db.From("test").ScanStructs(&myStructs); err != nil{ +// panic(err.Error() +// } +// //use your structs +// +// +// i: A pointer to a slice of structs. +func (q QueryExecutor) ScanStructs(i interface{}) error { + return q.ScanStructsContext(context.Background(), i) +} + +// This will execute the SQL and append results to the slice +// var myStructs []MyStruct +// if err := db.From("test").ScanStructsContext(ctx, &myStructs); err != nil{ +// panic(err.Error() +// } +// //use your structs +// +// +// i: A pointer to a slice of structs. +func (q QueryExecutor) ScanStructsContext(ctx context.Context, i interface{}) error { + scanner, err := q.ScannerContext(ctx) + if err != nil { + return err + } + defer func() { _ = scanner.Close() }() + return scanner.ScanStructs(i) +} + +// This will execute the SQL and fill out the struct with the fields returned. +// This method returns a boolean value that is false if no record was found +// var myStruct MyStruct +// found, err := db.From("test").Limit(1).ScanStruct(&myStruct) +// if err != nil{ +// panic(err.Error() +// } +// if !found{ +// fmt.Println("NOT FOUND") +// } +// +// i: A pointer to a struct +func (q QueryExecutor) ScanStruct(i interface{}) (bool, error) { + return q.ScanStructContext(context.Background(), i) +} + +// This will execute the SQL and fill out the struct with the fields returned. +// This method returns a boolean value that is false if no record was found +// var myStruct MyStruct +// found, err := db.From("test").Limit(1).ScanStructContext(ctx, &myStruct) +// if err != nil{ +// panic(err.Error() +// } +// if !found{ +// fmt.Println("NOT FOUND") +// } +// +// i: A pointer to a struct +func (q QueryExecutor) ScanStructContext(ctx context.Context, i interface{}) (bool, error) { + val := reflect.ValueOf(i) + if !util.IsPointer(val.Kind()) { + return false, errUnsupportedScanStructType + } + val = reflect.Indirect(val) + if !util.IsStruct(val.Kind()) { + return false, errUnsupportedScanStructType + } + + scanner, err := q.ScannerContext(ctx) + if err != nil { + return false, err + } + + defer func() { _ = scanner.Close() }() + + if scanner.Next() { + err = scanner.ScanStruct(i) + if err != nil { + return false, err + } + + return true, scanner.Err() + } + + return false, scanner.Err() +} + +// This will execute the SQL and append results to the slice. +// var ids []uint32 +// if err := db.From("test").Select("id").ScanVals(&ids); err != nil{ +// panic(err.Error() +// } +// +// i: Takes a pointer to a slice of primitive values. +func (q QueryExecutor) ScanVals(i interface{}) error { + return q.ScanValsContext(context.Background(), i) +} + +// This will execute the SQL and append results to the slice. +// var ids []uint32 +// if err := db.From("test").Select("id").ScanValsContext(ctx, &ids); err != nil{ +// panic(err.Error() +// } +// +// i: Takes a pointer to a slice of primitive values. +func (q QueryExecutor) ScanValsContext(ctx context.Context, i interface{}) error { + scanner, err := q.ScannerContext(ctx) + if err != nil { + return err + } + defer func() { _ = scanner.Close() }() + return scanner.ScanVals(i) +} + +// This will execute the SQL and set the value of the primitive. This method will return false if no record is found. +// var id uint32 +// found, err := db.From("test").Select("id").Limit(1).ScanVal(&id) +// if err != nil{ +// panic(err.Error() +// } +// if !found{ +// fmt.Println("NOT FOUND") +// } +// +// i: Takes a pointer to a primitive value. +func (q QueryExecutor) ScanVal(i interface{}) (bool, error) { + return q.ScanValContext(context.Background(), i) +} + +// This will execute the SQL and set the value of the primitive. This method will return false if no record is found. +// var id uint32 +// found, err := db.From("test").Select("id").Limit(1).ScanValContext(ctx, &id) +// if err != nil{ +// panic(err.Error() +// } +// if !found{ +// fmt.Println("NOT FOUND") +// } +// +// i: Takes a pointer to a primitive value. +func (q QueryExecutor) ScanValContext(ctx context.Context, i interface{}) (bool, error) { + val := reflect.ValueOf(i) + if !util.IsPointer(val.Kind()) { + return false, errScanValPointer + } + val = reflect.Indirect(val) + if util.IsSlice(val.Kind()) { + switch i.(type) { + case *gsql.RawBytes: // do nothing + case *[]byte: // do nothing + case gsql.Scanner: // do nothing + default: + return false, errScanValNonSlice + } + } + + scanner, err := q.ScannerContext(ctx) + if err != nil { + return false, err + } + + defer func() { _ = scanner.Close() }() + + if scanner.Next() { + err = scanner.ScanVal(i) + if err != nil { + return false, err + } + + return true, scanner.Err() + } + + return false, scanner.Err() +} + +// Scanner will return a Scanner that can be used for manually scanning rows. +func (q QueryExecutor) Scanner() (Scanner, error) { + return q.ScannerContext(context.Background()) +} + +// ScannerContext will return a Scanner that can be used for manually scanning rows. +func (q QueryExecutor) ScannerContext(ctx context.Context) (Scanner, error) { + rows, err := q.QueryContext(ctx) + if err != nil { + return nil, err + } + return NewScanner(rows), nil +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exec/query_factory.go b/vendor/github.com/doug-martin/goqu/v9/exec/query_factory.go new file mode 100644 index 000000000..6ae517d6a --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exec/query_factory.go @@ -0,0 +1,36 @@ +package exec + +import ( + "context" + "database/sql" + + "github.com/doug-martin/goqu/v9/internal/sb" +) + +type ( + // nolint:stylecheck // keep name for backwards compatibility + DbExecutor interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + } + QueryFactory interface { + FromSQL(sql string, args ...interface{}) QueryExecutor + FromSQLBuilder(b sb.SQLBuilder) QueryExecutor + } + querySupport struct { + de DbExecutor + } +) + +func NewQueryFactory(de DbExecutor) QueryFactory { + return &querySupport{de} +} + +func (qs *querySupport) FromSQL(query string, args ...interface{}) QueryExecutor { + return newQueryExecutor(qs.de, nil, query, args...) +} + +func (qs *querySupport) FromSQLBuilder(b sb.SQLBuilder) QueryExecutor { + query, args, err := b.ToSQL() + return newQueryExecutor(qs.de, err, query, args...) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exec/scanner.go b/vendor/github.com/doug-martin/goqu/v9/exec/scanner.go new file mode 100644 index 000000000..c05411b9f --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exec/scanner.go @@ -0,0 +1,168 @@ +package exec + +import ( + "database/sql" + "reflect" + + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/util" +) + +type ( + // Scanner knows how to scan sql.Rows into structs. + Scanner interface { + Next() bool + ScanStruct(i interface{}) error + ScanStructs(i interface{}) error + ScanVal(i interface{}) error + ScanVals(i interface{}) error + Close() error + Err() error + } + + scanner struct { + rows *sql.Rows + columnMap util.ColumnMap + columns []string + } +) + +func unableToFindFieldError(col string) error { + return errors.New(`unable to find corresponding field to column "%s" returned by query`, col) +} + +// NewScanner returns a scanner that can be used for scanning rows into structs. +func NewScanner(rows *sql.Rows) Scanner { + return &scanner{rows: rows} +} + +// Next prepares the next row for Scanning. See sql.Rows#Next for more +// information. +func (s *scanner) Next() bool { + return s.rows.Next() +} + +// Err returns the error, if any that was encountered during iteration. See +// sql.Rows#Err for more information. +func (s *scanner) Err() error { + return s.rows.Err() +} + +// ScanStruct will scan the current row into i. +func (s *scanner) ScanStruct(i interface{}) error { + // Setup columnMap and columns, but only once. + if s.columnMap == nil || s.columns == nil { + cm, err := util.GetColumnMap(i) + if err != nil { + return err + } + + cols, err := s.rows.Columns() + if err != nil { + return err + } + + s.columnMap = cm + s.columns = cols + } + + scans := make([]interface{}, 0, len(s.columns)) + for _, col := range s.columns { + data, ok := s.columnMap[col] + switch { + case !ok: + return unableToFindFieldError(col) + default: + scans = append(scans, reflect.New(data.GoType).Interface()) + } + } + + if err := s.rows.Scan(scans...); err != nil { + return err + } + + record := exp.Record{} + for index, col := range s.columns { + record[col] = scans[index] + } + + util.AssignStructVals(i, record, s.columnMap) + + return s.Err() +} + +// ScanStructs scans results in slice of structs +func (s *scanner) ScanStructs(i interface{}) error { + val, err := checkScanStructsTarget(i) + if err != nil { + return err + } + return s.scanIntoSlice(val, func(i interface{}) error { + return s.ScanStruct(i) + }) +} + +// ScanVal will scan the current row and column into i. +func (s *scanner) ScanVal(i interface{}) error { + if err := s.rows.Scan(i); err != nil { + return err + } + + return s.Err() +} + +// ScanStructs scans results in slice of values +func (s *scanner) ScanVals(i interface{}) error { + val, err := checkScanValsTarget(i) + if err != nil { + return err + } + return s.scanIntoSlice(val, func(i interface{}) error { + return s.ScanVal(i) + }) +} + +// Close closes the Rows, preventing further enumeration. See sql.Rows#Close +// for more info. +func (s *scanner) Close() error { + return s.rows.Close() +} + +func (s *scanner) scanIntoSlice(val reflect.Value, it func(i interface{}) error) error { + elemType := util.GetSliceElementType(val) + + for s.Next() { + row := reflect.New(elemType) + if rowErr := it(row.Interface()); rowErr != nil { + return rowErr + } + util.AppendSliceElement(val, row) + } + + return s.Err() +} + +func checkScanStructsTarget(i interface{}) (reflect.Value, error) { + val := reflect.ValueOf(i) + if !util.IsPointer(val.Kind()) { + return val, errUnsupportedScanStructsType + } + val = reflect.Indirect(val) + if !util.IsSlice(val.Kind()) { + return val, errUnsupportedScanStructsType + } + return val, nil +} + +func checkScanValsTarget(i interface{}) (reflect.Value, error) { + val := reflect.ValueOf(i) + if !util.IsPointer(val.Kind()) { + return val, errUnsupportedScanValsType + } + val = reflect.Indirect(val) + if !util.IsSlice(val.Kind()) { + return val, errUnsupportedScanValsType + } + return val, nil +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/alias.go b/vendor/github.com/doug-martin/goqu/v9/exp/alias.go new file mode 100644 index 000000000..2640627b8 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/alias.go @@ -0,0 +1,59 @@ +package exp + +import "fmt" + +type ( + aliasExpression struct { + aliased Expression + alias IdentifierExpression + } +) + +// Creates a new AliasedExpression for the Expression and alias +func NewAliasExpression(exp Expression, alias interface{}) AliasedExpression { + switch v := alias.(type) { + case string: + return aliasExpression{aliased: exp, alias: ParseIdentifier(v)} + case IdentifierExpression: + return aliasExpression{aliased: exp, alias: v} + default: + panic(fmt.Sprintf("Cannot create alias from %+v", v)) + } +} + +func (ae aliasExpression) Clone() Expression { + return NewAliasExpression(ae.aliased, ae.alias.Clone()) +} + +func (ae aliasExpression) Expression() Expression { + return ae +} + +func (ae aliasExpression) Aliased() Expression { + return ae.aliased +} + +func (ae aliasExpression) GetAs() IdentifierExpression { + return ae.alias +} + +// Returns a new IdentifierExpression with the specified schema +func (ae aliasExpression) Schema(schema string) IdentifierExpression { + return ae.alias.Schema(schema) +} + +// Returns a new IdentifierExpression with the specified table +func (ae aliasExpression) Table(table string) IdentifierExpression { + return ae.alias.Table(table) +} + +// Returns a new IdentifierExpression with the specified column +func (ae aliasExpression) Col(col interface{}) IdentifierExpression { + return ae.alias.Col(col) +} + +// Returns a new IdentifierExpression with the column set to * +// I("my_table").As("t").All() //"t".* +func (ae aliasExpression) All() IdentifierExpression { + return ae.alias.All() +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/bitwise.go b/vendor/github.com/doug-martin/goqu/v9/exp/bitwise.go new file mode 100644 index 000000000..eede5a2f9 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/bitwise.go @@ -0,0 +1,89 @@ +package exp + +type bitwise struct { + lhs Expression + rhs interface{} + op BitwiseOperation +} + +func NewBitwiseExpression(op BitwiseOperation, lhs Expression, rhs interface{}) BitwiseExpression { + return bitwise{op: op, lhs: lhs, rhs: rhs} +} + +func (b bitwise) Clone() Expression { + return NewBitwiseExpression(b.op, b.lhs.Clone(), b.rhs) +} + +func (b bitwise) RHS() interface{} { + return b.rhs +} + +func (b bitwise) LHS() Expression { + return b.lhs +} + +func (b bitwise) Op() BitwiseOperation { + return b.op +} + +func (b bitwise) Expression() Expression { return b } +func (b bitwise) As(val interface{}) AliasedExpression { return NewAliasExpression(b, val) } +func (b bitwise) Eq(val interface{}) BooleanExpression { return eq(b, val) } +func (b bitwise) Neq(val interface{}) BooleanExpression { return neq(b, val) } +func (b bitwise) Gt(val interface{}) BooleanExpression { return gt(b, val) } +func (b bitwise) Gte(val interface{}) BooleanExpression { return gte(b, val) } +func (b bitwise) Lt(val interface{}) BooleanExpression { return lt(b, val) } +func (b bitwise) Lte(val interface{}) BooleanExpression { return lte(b, val) } +func (b bitwise) Asc() OrderedExpression { return asc(b) } +func (b bitwise) Desc() OrderedExpression { return desc(b) } +func (b bitwise) Like(i interface{}) BooleanExpression { return like(b, i) } +func (b bitwise) NotLike(i interface{}) BooleanExpression { return notLike(b, i) } +func (b bitwise) ILike(i interface{}) BooleanExpression { return iLike(b, i) } +func (b bitwise) NotILike(i interface{}) BooleanExpression { return notILike(b, i) } +func (b bitwise) RegexpLike(val interface{}) BooleanExpression { return regexpLike(b, val) } +func (b bitwise) RegexpNotLike(val interface{}) BooleanExpression { return regexpNotLike(b, val) } +func (b bitwise) RegexpILike(val interface{}) BooleanExpression { return regexpILike(b, val) } +func (b bitwise) RegexpNotILike(val interface{}) BooleanExpression { return regexpNotILike(b, val) } +func (b bitwise) In(i ...interface{}) BooleanExpression { return in(b, i...) } +func (b bitwise) NotIn(i ...interface{}) BooleanExpression { return notIn(b, i...) } +func (b bitwise) Is(i interface{}) BooleanExpression { return is(b, i) } +func (b bitwise) IsNot(i interface{}) BooleanExpression { return isNot(b, i) } +func (b bitwise) IsNull() BooleanExpression { return is(b, nil) } +func (b bitwise) IsNotNull() BooleanExpression { return isNot(b, nil) } +func (b bitwise) IsTrue() BooleanExpression { return is(b, true) } +func (b bitwise) IsNotTrue() BooleanExpression { return isNot(b, true) } +func (b bitwise) IsFalse() BooleanExpression { return is(b, false) } +func (b bitwise) IsNotFalse() BooleanExpression { return isNot(b, false) } +func (b bitwise) Distinct() SQLFunctionExpression { return NewSQLFunctionExpression("DISTINCT", b) } +func (b bitwise) Between(val RangeVal) RangeExpression { return between(b, val) } +func (b bitwise) NotBetween(val RangeVal) RangeExpression { return notBetween(b, val) } + +// used internally to create a Bitwise Inversion BitwiseExpression +func bitwiseInversion(rhs Expression) BitwiseExpression { + return NewBitwiseExpression(BitwiseInversionOp, nil, rhs) +} + +// used internally to create a Bitwise OR BitwiseExpression +func bitwiseOr(lhs Expression, rhs interface{}) BitwiseExpression { + return NewBitwiseExpression(BitwiseOrOp, lhs, rhs) +} + +// used internally to create a Bitwise AND BitwiseExpression +func bitwiseAnd(lhs Expression, rhs interface{}) BitwiseExpression { + return NewBitwiseExpression(BitwiseAndOp, lhs, rhs) +} + +// used internally to create a Bitwise XOR BitwiseExpression +func bitwiseXor(lhs Expression, rhs interface{}) BitwiseExpression { + return NewBitwiseExpression(BitwiseXorOp, lhs, rhs) +} + +// used internally to create a Bitwise LEFT SHIFT BitwiseExpression +func bitwiseLeftShift(lhs Expression, rhs interface{}) BitwiseExpression { + return NewBitwiseExpression(BitwiseLeftShiftOp, lhs, rhs) +} + +// used internally to create a Bitwise RIGHT SHIFT BitwiseExpression +func bitwiseRightShift(lhs Expression, rhs interface{}) BitwiseExpression { + return NewBitwiseExpression(BitwiseRightShiftOp, lhs, rhs) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/bool.go b/vendor/github.com/doug-martin/goqu/v9/exp/bool.go new file mode 100644 index 000000000..a38f356ff --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/bool.go @@ -0,0 +1,185 @@ +package exp + +import ( + "reflect" + "regexp" +) + +type boolean struct { + lhs Expression + rhs interface{} + op BooleanOperation +} + +func NewBooleanExpression(op BooleanOperation, lhs Expression, rhs interface{}) BooleanExpression { + return boolean{op: op, lhs: lhs, rhs: rhs} +} + +func (b boolean) Clone() Expression { + return NewBooleanExpression(b.op, b.lhs.Clone(), b.rhs) +} + +func (b boolean) Expression() Expression { + return b +} + +func (b boolean) RHS() interface{} { + return b.rhs +} + +func (b boolean) LHS() Expression { + return b.lhs +} + +func (b boolean) Op() BooleanOperation { + return b.op +} + +func (b boolean) As(val interface{}) AliasedExpression { + return NewAliasExpression(b, val) +} + +// used internally to create an equality BooleanExpression +func eq(lhs Expression, rhs interface{}) BooleanExpression { + return checkBoolExpType(EqOp, lhs, rhs, false) +} + +// used internally to create an in-equality BooleanExpression +func neq(lhs Expression, rhs interface{}) BooleanExpression { + return checkBoolExpType(EqOp, lhs, rhs, true) +} + +// used internally to create an gt comparison BooleanExpression +func gt(lhs Expression, rhs interface{}) BooleanExpression { + return NewBooleanExpression(GtOp, lhs, rhs) +} + +// used internally to create an gte comparison BooleanExpression +func gte(lhs Expression, rhs interface{}) BooleanExpression { + return NewBooleanExpression(GteOp, lhs, rhs) +} + +// used internally to create an lt comparison BooleanExpression +func lt(lhs Expression, rhs interface{}) BooleanExpression { + return NewBooleanExpression(LtOp, lhs, rhs) +} + +// used internally to create an lte comparison BooleanExpression +func lte(lhs Expression, rhs interface{}) BooleanExpression { + return NewBooleanExpression(LteOp, lhs, rhs) +} + +// used internally to create an IN BooleanExpression +func in(lhs Expression, vals ...interface{}) BooleanExpression { + if len(vals) == 1 && reflect.Indirect(reflect.ValueOf(vals[0])).Kind() == reflect.Slice { + return NewBooleanExpression(InOp, lhs, vals[0]) + } + return NewBooleanExpression(InOp, lhs, vals) +} + +// used internally to create a NOT IN BooleanExpression +func notIn(lhs Expression, vals ...interface{}) BooleanExpression { + if len(vals) == 1 && reflect.Indirect(reflect.ValueOf(vals[0])).Kind() == reflect.Slice { + return NewBooleanExpression(NotInOp, lhs, vals[0]) + } + return NewBooleanExpression(NotInOp, lhs, vals) +} + +// used internally to create an IS BooleanExpression +func is(lhs Expression, val interface{}) BooleanExpression { + return checkBoolExpType(IsOp, lhs, val, false) +} + +// used internally to create an IS NOT BooleanExpression +func isNot(lhs Expression, val interface{}) BooleanExpression { + return checkBoolExpType(IsOp, lhs, val, true) +} + +// used internally to create a LIKE BooleanExpression +func like(lhs Expression, val interface{}) BooleanExpression { + return checkLikeExp(LikeOp, lhs, val, false) +} + +// used internally to create an ILIKE BooleanExpression +func iLike(lhs Expression, val interface{}) BooleanExpression { + return checkLikeExp(ILikeOp, lhs, val, false) +} + +// used internally to create a NOT LIKE BooleanExpression +func notLike(lhs Expression, val interface{}) BooleanExpression { + return checkLikeExp(LikeOp, lhs, val, true) +} + +// used internally to create a NOT ILIKE BooleanExpression +func notILike(lhs Expression, val interface{}) BooleanExpression { + return checkLikeExp(ILikeOp, lhs, val, true) +} + +// used internally to create a LIKE BooleanExpression +func regexpLike(lhs Expression, val interface{}) BooleanExpression { + return checkLikeExp(RegexpLikeOp, lhs, val, false) +} + +// used internally to create an ILIKE BooleanExpression +func regexpILike(lhs Expression, val interface{}) BooleanExpression { + return checkLikeExp(RegexpILikeOp, lhs, val, false) +} + +// used internally to create a NOT LIKE BooleanExpression +func regexpNotLike(lhs Expression, val interface{}) BooleanExpression { + return checkLikeExp(RegexpLikeOp, lhs, val, true) +} + +// used internally to create a NOT ILIKE BooleanExpression +func regexpNotILike(lhs Expression, val interface{}) BooleanExpression { + return checkLikeExp(RegexpILikeOp, lhs, val, true) +} + +// checks an like rhs to create the proper like expression for strings or regexps +func checkLikeExp(op BooleanOperation, lhs Expression, val interface{}, invert bool) BooleanExpression { + rhs := val + + if t, ok := val.(*regexp.Regexp); ok { + if op == LikeOp { + op = RegexpLikeOp + } else if op == ILikeOp { + op = RegexpILikeOp + } + rhs = t.String() + } + if invert { + op = operatorInversions[op] + } + return NewBooleanExpression(op, lhs, rhs) +} + +// checks a boolean operation normalizing the operation based on the RHS (e.g. "a" = true vs "a" IS TRUE +func checkBoolExpType(op BooleanOperation, lhs Expression, rhs interface{}, invert bool) BooleanExpression { + if rhs == nil { + op = IsOp + } else { + switch reflect.Indirect(reflect.ValueOf(rhs)).Kind() { + case reflect.Bool: + op = IsOp + case reflect.Slice: + // if its a slice of bytes dont treat as an IN + if _, ok := rhs.([]byte); !ok { + op = InOp + } + case reflect.Struct: + switch rhs.(type) { + case SQLExpression: + op = InOp + case AppendableExpression: + op = InOp + case *regexp.Regexp: + return checkLikeExp(LikeOp, lhs, rhs, invert) + } + default: + } + } + if invert { + op = operatorInversions[op] + } + return NewBooleanExpression(op, lhs, rhs) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/case.go b/vendor/github.com/doug-martin/goqu/v9/exp/case.go new file mode 100644 index 000000000..d20800a38 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/case.go @@ -0,0 +1,78 @@ +package exp + +type ( + caseElse struct { + result interface{} + } + caseWhen struct { + caseElse + condition interface{} + } + caseExpression struct { + value interface{} + whens []CaseWhen + elseCondition CaseElse + } +) + +func NewCaseElse(result interface{}) CaseElse { + return caseElse{result: result} +} + +func (ce caseElse) Result() interface{} { + return ce.result +} + +func NewCaseWhen(condition, result interface{}) CaseWhen { + return caseWhen{caseElse: caseElse{result: result}, condition: condition} +} + +func (cw caseWhen) Condition() interface{} { + return cw.condition +} + +func NewCaseExpression() CaseExpression { + return caseExpression{value: nil, whens: []CaseWhen{}, elseCondition: nil} +} + +func (c caseExpression) Expression() Expression { + return c +} + +func (c caseExpression) Clone() Expression { + return caseExpression{value: c.value, whens: c.whens, elseCondition: c.elseCondition} +} + +func (c caseExpression) As(alias interface{}) AliasedExpression { + return NewAliasExpression(c, alias) +} + +func (c caseExpression) GetValue() interface{} { + return c.value +} + +func (c caseExpression) GetWhens() []CaseWhen { + return c.whens +} + +func (c caseExpression) GetElse() CaseElse { + return c.elseCondition +} + +func (c caseExpression) Value(value interface{}) CaseExpression { + c.value = value + return c +} + +func (c caseExpression) When(condition, result interface{}) CaseExpression { + c.whens = append(c.whens, NewCaseWhen(condition, result)) + return c +} + +func (c caseExpression) Else(result interface{}) CaseExpression { + c.elseCondition = NewCaseElse(result) + return c +} + +func (c caseExpression) Asc() OrderedExpression { return asc(c) } +func (c caseExpression) Desc() OrderedExpression { return desc(c) } diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/cast.go b/vendor/github.com/doug-martin/goqu/v9/exp/cast.go new file mode 100644 index 000000000..d881e26b2 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/cast.go @@ -0,0 +1,56 @@ +package exp + +type cast struct { + casted Expression + t LiteralExpression +} + +// Creates a new Casted expression +// Cast(I("a"), "NUMERIC") -> CAST("a" AS NUMERIC) +func NewCastExpression(e Expression, t string) CastExpression { + return cast{casted: e, t: NewLiteralExpression(t)} +} + +func (c cast) Casted() Expression { + return c.casted +} + +func (c cast) Type() LiteralExpression { + return c.t +} + +func (c cast) Clone() Expression { + return cast{casted: c.casted.Clone(), t: c.t} +} + +func (c cast) Expression() Expression { return c } +func (c cast) As(val interface{}) AliasedExpression { return NewAliasExpression(c, val) } +func (c cast) Eq(val interface{}) BooleanExpression { return eq(c, val) } +func (c cast) Neq(val interface{}) BooleanExpression { return neq(c, val) } +func (c cast) Gt(val interface{}) BooleanExpression { return gt(c, val) } +func (c cast) Gte(val interface{}) BooleanExpression { return gte(c, val) } +func (c cast) Lt(val interface{}) BooleanExpression { return lt(c, val) } +func (c cast) Lte(val interface{}) BooleanExpression { return lte(c, val) } +func (c cast) Asc() OrderedExpression { return asc(c) } +func (c cast) Desc() OrderedExpression { return desc(c) } +func (c cast) Like(i interface{}) BooleanExpression { return like(c, i) } +func (c cast) NotLike(i interface{}) BooleanExpression { return notLike(c, i) } +func (c cast) ILike(i interface{}) BooleanExpression { return iLike(c, i) } +func (c cast) NotILike(i interface{}) BooleanExpression { return notILike(c, i) } +func (c cast) RegexpLike(val interface{}) BooleanExpression { return regexpLike(c, val) } +func (c cast) RegexpNotLike(val interface{}) BooleanExpression { return regexpNotLike(c, val) } +func (c cast) RegexpILike(val interface{}) BooleanExpression { return regexpILike(c, val) } +func (c cast) RegexpNotILike(val interface{}) BooleanExpression { return regexpNotILike(c, val) } +func (c cast) In(i ...interface{}) BooleanExpression { return in(c, i...) } +func (c cast) NotIn(i ...interface{}) BooleanExpression { return notIn(c, i...) } +func (c cast) Is(i interface{}) BooleanExpression { return is(c, i) } +func (c cast) IsNot(i interface{}) BooleanExpression { return isNot(c, i) } +func (c cast) IsNull() BooleanExpression { return is(c, nil) } +func (c cast) IsNotNull() BooleanExpression { return isNot(c, nil) } +func (c cast) IsTrue() BooleanExpression { return is(c, true) } +func (c cast) IsNotTrue() BooleanExpression { return isNot(c, true) } +func (c cast) IsFalse() BooleanExpression { return is(c, false) } +func (c cast) IsNotFalse() BooleanExpression { return isNot(c, false) } +func (c cast) Distinct() SQLFunctionExpression { return NewSQLFunctionExpression("DISTINCT", c) } +func (c cast) Between(val RangeVal) RangeExpression { return between(c, val) } +func (c cast) NotBetween(val RangeVal) RangeExpression { return notBetween(c, val) } diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/col.go b/vendor/github.com/doug-martin/goqu/v9/exp/col.go new file mode 100644 index 000000000..042969177 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/col.go @@ -0,0 +1,84 @@ +package exp + +import ( + "fmt" + "reflect" + + "github.com/doug-martin/goqu/v9/internal/util" +) + +type columnList struct { + columns []Expression +} + +func NewColumnListExpression(vals ...interface{}) ColumnListExpression { + cols := []Expression{} + for _, val := range vals { + switch t := val.(type) { + case nil: // do nothing + case string: + cols = append(cols, ParseIdentifier(t)) + case ColumnListExpression: + cols = append(cols, t.Columns()...) + case Expression: + cols = append(cols, t) + default: + _, valKind := util.GetTypeInfo(val, reflect.Indirect(reflect.ValueOf(val))) + + if valKind == reflect.Struct { + cm, err := util.GetColumnMap(val) + if err != nil { + panic(err.Error()) + } + structCols := cm.Cols() + for _, col := range structCols { + i := ParseIdentifier(col) + var sc Expression = i + if i.IsQualified() { + sc = i.As(NewIdentifierExpression("", "", col)) + } + cols = append(cols, sc) + } + } else { + panic(fmt.Sprintf("Cannot created expression from %+v", val)) + } + } + } + return columnList{columns: cols} +} + +func NewOrderedColumnList(vals ...OrderedExpression) ColumnListExpression { + exps := make([]interface{}, 0, len(vals)) + for _, col := range vals { + exps = append(exps, col.Expression()) + } + return NewColumnListExpression(exps...) +} + +func (cl columnList) Clone() Expression { + newExps := make([]Expression, 0, len(cl.columns)) + for _, exp := range cl.columns { + newExps = append(newExps, exp.Clone()) + } + return columnList{columns: newExps} +} + +func (cl columnList) Expression() Expression { + return cl +} + +func (cl columnList) IsEmpty() bool { + return len(cl.columns) == 0 +} + +func (cl columnList) Columns() []Expression { + return cl.columns +} + +func (cl columnList) Append(cols ...Expression) ColumnListExpression { + ret := columnList{} + exps := append(ret.columns, cl.columns...) + exps = append(exps, cols...) + ret.columns = exps + return ret +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/compound.go b/vendor/github.com/doug-martin/goqu/v9/exp/compound.go new file mode 100644 index 000000000..d631991f0 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/compound.go @@ -0,0 +1,19 @@ +package exp + +type compound struct { + t CompoundType + rhs AppendableExpression +} + +func NewCompoundExpression(ct CompoundType, rhs AppendableExpression) CompoundExpression { + return compound{t: ct, rhs: rhs} +} + +func (c compound) Expression() Expression { return c } + +func (c compound) Clone() Expression { + return compound{t: c.t, rhs: c.rhs.Clone().(AppendableExpression)} +} + +func (c compound) Type() CompoundType { return c.t } +func (c compound) RHS() AppendableExpression { return c.rhs } diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/conflict.go b/vendor/github.com/doug-martin/goqu/v9/exp/conflict.go new file mode 100644 index 000000000..4c7c5ead0 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/conflict.go @@ -0,0 +1,86 @@ +package exp + +type ( + doNothingConflict struct{} + // ConflictUpdate is the struct that represents the UPDATE fragment of an + // INSERT ... ON CONFLICT/ON DUPLICATE KEY DO UPDATE statement + conflictUpdate struct { + target string + update interface{} + whereClause ExpressionList + } +) + +// Creates a conflict struct to be passed to InsertConflict to ignore constraint errors +// InsertConflict(DoNothing(),...) -> INSERT INTO ... ON CONFLICT DO NOTHING +func NewDoNothingConflictExpression() ConflictExpression { + return &doNothingConflict{} +} + +func (c doNothingConflict) Expression() Expression { + return c +} + +func (c doNothingConflict) Clone() Expression { + return c +} + +func (c doNothingConflict) Action() ConflictAction { + return DoNothingConflictAction +} + +// Creates a ConflictUpdate struct to be passed to InsertConflict +// Represents a ON CONFLICT DO UPDATE portion of an INSERT statement (ON DUPLICATE KEY UPDATE for mysql) +// +// InsertConflict(DoUpdate("target_column", update),...) -> +// INSERT INTO ... ON CONFLICT DO UPDATE SET a=b +// InsertConflict(DoUpdate("target_column", update).Where(Ex{"a": 1},...) -> +// INSERT INTO ... ON CONFLICT DO UPDATE SET a=b WHERE a=1 +func NewDoUpdateConflictExpression(target string, update interface{}) ConflictUpdateExpression { + return &conflictUpdate{target: target, update: update} +} + +func (c conflictUpdate) Expression() Expression { + return c +} + +func (c conflictUpdate) Clone() Expression { + return &conflictUpdate{ + target: c.target, + update: c.update, + whereClause: c.whereClause.Clone().(ExpressionList), + } +} + +func (c conflictUpdate) Action() ConflictAction { + return DoUpdateConflictAction +} + +// Returns the target conflict column. Only necessary for Postgres. +// Will return an error for mysql/sqlite. Will also return an error if missing from a postgres ConflictUpdate. +func (c conflictUpdate) TargetColumn() string { + return c.target +} + +// Returns the Updates which represent the ON CONFLICT DO UPDATE portion of an insert statement. If nil, +// there are no updates. +func (c conflictUpdate) Update() interface{} { + return c.update +} + +// Append to the existing Where clause for an ON CONFLICT DO UPDATE ... WHERE ... +// InsertConflict(DoNothing(),...) -> INSERT INTO ... ON CONFLICT DO NOTHING +func (c *conflictUpdate) Where(expressions ...Expression) ConflictUpdateExpression { + if c.whereClause == nil { + c.whereClause = NewExpressionList(AndType, expressions...) + } else { + c.whereClause = c.whereClause.Append(expressions...) + } + return c +} + +// Append to the existing Where clause for an ON CONFLICT DO UPDATE ... WHERE ... +// InsertConflict(DoNothing(),...) -> INSERT INTO ... ON CONFLICT DO NOTHING +func (c *conflictUpdate) WhereClause() ExpressionList { + return c.whereClause +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/cte.go b/vendor/github.com/doug-martin/goqu/v9/exp/cte.go new file mode 100644 index 000000000..4282d0b6b --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/cte.go @@ -0,0 +1,23 @@ +package exp + +type commonExpr struct { + recursive bool + name LiteralExpression + subQuery Expression +} + +// Creates a new WITH common table expression for a SQLExpression, typically Datasets'. This function is used +// internally by Dataset when a CTE is added to another Dataset +func NewCommonTableExpression(recursive bool, name string, subQuery Expression) CommonTableExpression { + return commonExpr{recursive: recursive, name: NewLiteralExpression(name), subQuery: subQuery} +} + +func (ce commonExpr) Expression() Expression { return ce } + +func (ce commonExpr) Clone() Expression { + return commonExpr{recursive: ce.recursive, name: ce.name, subQuery: ce.subQuery.Clone().(SQLExpression)} +} + +func (ce commonExpr) IsRecursive() bool { return ce.recursive } +func (ce commonExpr) Name() LiteralExpression { return ce.name } +func (ce commonExpr) SubQuery() Expression { return ce.subQuery } diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/delete_clauses.go b/vendor/github.com/doug-martin/goqu/v9/exp/delete_clauses.go new file mode 100644 index 000000000..25af851f3 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/delete_clauses.go @@ -0,0 +1,177 @@ +package exp + +type ( + DeleteClauses interface { + HasFrom() bool + clone() *deleteClauses + + CommonTables() []CommonTableExpression + CommonTablesAppend(cte CommonTableExpression) DeleteClauses + + From() IdentifierExpression + SetFrom(table IdentifierExpression) DeleteClauses + + Where() ExpressionList + ClearWhere() DeleteClauses + WhereAppend(expressions ...Expression) DeleteClauses + + Order() ColumnListExpression + HasOrder() bool + ClearOrder() DeleteClauses + SetOrder(oes ...OrderedExpression) DeleteClauses + OrderAppend(...OrderedExpression) DeleteClauses + OrderPrepend(...OrderedExpression) DeleteClauses + + Limit() interface{} + HasLimit() bool + ClearLimit() DeleteClauses + SetLimit(limit interface{}) DeleteClauses + + Returning() ColumnListExpression + HasReturning() bool + SetReturning(cl ColumnListExpression) DeleteClauses + } + deleteClauses struct { + commonTables []CommonTableExpression + from IdentifierExpression + where ExpressionList + order ColumnListExpression + limit interface{} + returning ColumnListExpression + } +) + +func NewDeleteClauses() DeleteClauses { + return &deleteClauses{} +} + +func (dc *deleteClauses) HasFrom() bool { + return dc.from != nil +} + +func (dc *deleteClauses) clone() *deleteClauses { + return &deleteClauses{ + commonTables: dc.commonTables, + from: dc.from, + + where: dc.where, + order: dc.order, + limit: dc.limit, + returning: dc.returning, + } +} + +func (dc *deleteClauses) CommonTables() []CommonTableExpression { + return dc.commonTables +} + +func (dc *deleteClauses) CommonTablesAppend(cte CommonTableExpression) DeleteClauses { + ret := dc.clone() + ret.commonTables = append(ret.commonTables, cte) + return ret +} + +func (dc *deleteClauses) From() IdentifierExpression { + return dc.from +} + +func (dc *deleteClauses) SetFrom(table IdentifierExpression) DeleteClauses { + ret := dc.clone() + ret.from = table + return ret +} + +func (dc *deleteClauses) Where() ExpressionList { + return dc.where +} + +func (dc *deleteClauses) ClearWhere() DeleteClauses { + ret := dc.clone() + ret.where = nil + return ret +} + +func (dc *deleteClauses) WhereAppend(expressions ...Expression) DeleteClauses { + if len(expressions) == 0 { + return dc + } + ret := dc.clone() + if ret.where == nil { + ret.where = NewExpressionList(AndType, expressions...) + } else { + ret.where = ret.where.Append(expressions...) + } + return ret +} + +func (dc *deleteClauses) Order() ColumnListExpression { + return dc.order +} + +func (dc *deleteClauses) HasOrder() bool { + return dc.order != nil +} + +func (dc *deleteClauses) ClearOrder() DeleteClauses { + ret := dc.clone() + ret.order = nil + return ret +} + +func (dc *deleteClauses) SetOrder(oes ...OrderedExpression) DeleteClauses { + ret := dc.clone() + ret.order = NewOrderedColumnList(oes...) + return ret +} + +func (dc *deleteClauses) OrderAppend(oes ...OrderedExpression) DeleteClauses { + if dc.order == nil { + return dc.SetOrder(oes...) + } + ret := dc.clone() + ret.order = ret.order.Append(NewOrderedColumnList(oes...).Columns()...) + return ret +} + +func (dc *deleteClauses) OrderPrepend(oes ...OrderedExpression) DeleteClauses { + if dc.order == nil { + return dc.SetOrder(oes...) + } + ret := dc.clone() + ret.order = NewOrderedColumnList(oes...).Append(ret.order.Columns()...) + return ret +} + +func (dc *deleteClauses) Limit() interface{} { + return dc.limit +} + +func (dc *deleteClauses) HasLimit() bool { + return dc.limit != nil +} + +func (dc *deleteClauses) ClearLimit() DeleteClauses { + ret := dc.clone() + ret.limit = nil + return ret +} + +func (dc *deleteClauses) SetLimit(limit interface{}) DeleteClauses { + ret := dc.clone() + ret.limit = limit + return ret +} + +func (dc *deleteClauses) Returning() ColumnListExpression { + return dc.returning +} + +func (dc *deleteClauses) HasReturning() bool { + return dc.returning != nil && !dc.returning.IsEmpty() +} + +func (dc *deleteClauses) SetReturning(cl ColumnListExpression) DeleteClauses { + ret := dc.clone() + ret.returning = cl + return ret +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/exp.go b/vendor/github.com/doug-martin/goqu/v9/exp/exp.go new file mode 100644 index 000000000..240a96a62 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/exp.go @@ -0,0 +1,734 @@ +package exp + +import ( + "fmt" + + "github.com/doug-martin/goqu/v9/internal/sb" +) + +// Behaviors +type ( + + // Interface that an expression should implement if it can be aliased. + Aliaseable interface { + // Returns an AliasedExpression + // I("col").As("other_col") //"col" AS "other_col" + // I("col").As(I("other_col")) //"col" AS "other_col" + As(interface{}) AliasedExpression + } + + // Interface that an expression should implement if it can be casted to another SQL type . + Castable interface { + // Casts an expression to the specified type + // I("a").Cast("numeric")//CAST("a" AS numeric) + Cast(val string) CastExpression + } + + Inable interface { + // Creates a Boolean expression for IN clauses + // I("col").In([]string{"a", "b", "c"}) //("col" IN ('a', 'b', 'c')) + In(...interface{}) BooleanExpression + // Creates a Boolean expression for NOT IN clauses + // I("col").NotIn([]string{"a", "b", "c"}) //("col" NOT IN ('a', 'b', 'c')) + NotIn(...interface{}) BooleanExpression + } + + Isable interface { + // Creates an Boolean expression IS clauses + // ds.Where(I("a").Is(nil)) //("a" IS NULL) + // ds.Where(I("a").Is(true)) //("a" IS TRUE) + // ds.Where(I("a").Is(false)) //("a" IS FALSE) + Is(interface{}) BooleanExpression + // Creates an Boolean expression IS NOT clauses + // ds.Where(I("a").IsNot(nil)) //("a" IS NOT NULL) + // ds.Where(I("a").IsNot(true)) //("a" IS NOT TRUE) + // ds.Where(I("a").IsNot(false)) //("a" IS NOT FALSE) + IsNot(interface{}) BooleanExpression + // Shortcut for Is(nil) + IsNull() BooleanExpression + // Shortcut for IsNot(nil) + IsNotNull() BooleanExpression + // Shortcut for Is(true) + IsTrue() BooleanExpression + // Shortcut for IsNot(true) + IsNotTrue() BooleanExpression + // Shortcut for Is(false) + IsFalse() BooleanExpression + // Shortcut for IsNot(false) + IsNotFalse() BooleanExpression + } + + Likeable interface { + // Creates an Boolean expression for LIKE clauses + // ds.Where(I("a").Like("a%")) //("a" LIKE 'a%') + Like(interface{}) BooleanExpression + // Creates an Boolean expression for NOT LIKE clauses + // ds.Where(I("a").NotLike("a%")) //("a" NOT LIKE 'a%') + NotLike(interface{}) BooleanExpression + // Creates an Boolean expression for case insensitive LIKE clauses + // ds.Where(I("a").ILike("a%")) //("a" ILIKE 'a%') + ILike(interface{}) BooleanExpression + // Creates an Boolean expression for case insensitive NOT LIKE clauses + // ds.Where(I("a").NotILike("a%")) //("a" NOT ILIKE 'a%') + NotILike(interface{}) BooleanExpression + + // Creates an Boolean expression for REGEXP LIKE clauses + // ds.Where(I("a").RegexpLike("a%")) //("a" ~ 'a%') + RegexpLike(interface{}) BooleanExpression + // Creates an Boolean expression for REGEXP NOT LIKE clauses + // ds.Where(I("a").RegexpNotLike("a%")) //("a" !~ 'a%') + RegexpNotLike(interface{}) BooleanExpression + // Creates an Boolean expression for case insensitive REGEXP ILIKE clauses + // ds.Where(I("a").RegexpILike("a%")) //("a" ~* 'a%') + RegexpILike(interface{}) BooleanExpression + // Creates an Boolean expression for case insensitive REGEXP NOT ILIKE clauses + // ds.Where(I("a").RegexpNotILike("a%")) //("a" !~* 'a%') + RegexpNotILike(interface{}) BooleanExpression + } + + // Interface that an expression should implement if it can be compared with other values. + Comparable interface { + // Creates a Boolean expression comparing equality + // I("col").Eq(1) //("col" = 1) + Eq(interface{}) BooleanExpression + // Creates a Boolean expression comparing in-equality + // I("col").Neq(1) //("col" != 1) + Neq(interface{}) BooleanExpression + // Creates a Boolean expression for greater than comparisons + // I("col").Gt(1) //("col" > 1) + Gt(interface{}) BooleanExpression + // Creates a Boolean expression for greater than or equal to than comparisons + // I("col").Gte(1) //("col" >= 1) + Gte(interface{}) BooleanExpression + // Creates a Boolean expression for less than comparisons + // I("col").Lt(1) //("col" < 1) + Lt(interface{}) BooleanExpression + // Creates a Boolean expression for less than or equal to comparisons + // I("col").Lte(1) //("col" <= 1) + Lte(interface{}) BooleanExpression + } + + // Interface that an expression should implement if it can be used in a DISTINCT epxression. + Distinctable interface { + // Creates a DISTINCT clause + // I("a").Distinct() //DISTINCT("a") + Distinct() SQLFunctionExpression + } + + // Interface that an expression should implement if it can be ORDERED. + Orderable interface { + // Creates an Ordered Expression for sql ASC order + // ds.Order(I("a").Asc()) //ORDER BY "a" ASC + Asc() OrderedExpression + // Creates an Ordered Expression for sql DESC order + // ds.Order(I("a").Desc()) //ORDER BY "a" DESC + Desc() OrderedExpression + } + + Rangeable interface { + // Creates a Range expression for between comparisons + // I("col").Between(RangeVal{Start:1, End:10}) //("col" BETWEEN 1 AND 10) + Between(RangeVal) RangeExpression + // Creates a Range expression for between comparisons + // I("col").NotBetween(RangeVal{Start:1, End:10}) //("col" NOT BETWEEN 1 AND 10) + NotBetween(RangeVal) RangeExpression + } + + Updateable interface { + // Used internally by update sql + Set(interface{}) UpdateExpression + } + + Bitwiseable interface { + // Creates a Bit Operation Expresion for sql ~ + // I("col").BitiInversion() // (~ "col") + BitwiseInversion() BitwiseExpression + // Creates a Bit Operation Expresion for sql | + // I("col").BitOr(1) // ("col" | 1) + BitwiseOr(interface{}) BitwiseExpression + // Creates a Bit Operation Expresion for sql & + // I("col").BitAnd(1) // ("col" & 1) + BitwiseAnd(interface{}) BitwiseExpression + // Creates a Bit Operation Expresion for sql ^ + // I("col").BitXor(1) // ("col" ^ 1) + BitwiseXor(interface{}) BitwiseExpression + // Creates a Bit Operation Expresion for sql << + // I("col").BitLeftShift(1) // ("col" << 1) + BitwiseLeftShift(interface{}) BitwiseExpression + // Creates a Bit Operation Expresion for sql >> + // I("col").BitRighttShift(1) // ("col" >> 1) + BitwiseRightShift(interface{}) BitwiseExpression + } +) + +type ( + Vals []interface{} + // Parent of all expression types + Expression interface { + Clone() Expression + Expression() Expression + } + // An Expression that generates its own sql (e.g Dataset) + SQLExpression interface { + Expression + ToSQL() (string, []interface{}, error) + IsPrepared() bool + } + + AppendableExpression interface { + Expression + AppendSQL(b sb.SQLBuilder) + // Returns the alias value as an identiier expression + GetAs() IdentifierExpression + + // Returns true if this expression returns columns. + // Used to determine if a Select, Update, Insert, or Delete query returns columns + ReturnsColumns() bool + } + // Expression for Aliased expressions + // I("a").As("b") -> "a" AS "b" + // SUM("a").As(I("a_sum")) -> SUM("a") AS "a_sum" + AliasedExpression interface { + Expression + // Returns the Epxression being aliased + Aliased() Expression + // Returns the alias value as an identiier expression + GetAs() IdentifierExpression + + // Returns a new IdentifierExpression with the specified schema + Schema(string) IdentifierExpression + // Returns a new IdentifierExpression with the specified table + Table(string) IdentifierExpression + // Returns a new IdentifierExpression with the specified column + Col(interface{}) IdentifierExpression + // Returns a new IdentifierExpression with the column set to * + // I("my_table").All() //"my_table".* + All() IdentifierExpression + } + + BooleanOperation int + BooleanExpression interface { + Expression + Aliaseable + // Returns the operator for the expression + Op() BooleanOperation + // The left hand side of the expression (e.g. I("a") + LHS() Expression + // The right hand side of the expression could be a primitive value, dataset, or expression + RHS() interface{} + } + + BitwiseOperation int + BitwiseExpression interface { + Expression + Aliaseable + Comparable + Isable + Inable + Likeable + Rangeable + Orderable + Distinctable + // Returns the operator for the expression + Op() BitwiseOperation + // The left hand side of the expression (e.g. I("a") + LHS() Expression + // The right hand side of the expression could be a primitive value, dataset, or expression + RHS() interface{} + } + + // An Expression that represents another Expression casted to a SQL type + CastExpression interface { + Expression + Aliaseable + Comparable + Inable + Isable + Likeable + Orderable + Distinctable + Rangeable + // The exression being casted + Casted() Expression + // The the SQL type to cast the expression to + Type() LiteralExpression + } + // A list of columns. Typically used internally by Select, Order, From + ColumnListExpression interface { + Expression + // Returns the list of columns + Columns() []Expression + // Returns true if the column list is empty + IsEmpty() bool + // Returns a new ColumnListExpression with the columns appended. + Append(...Expression) ColumnListExpression + } + CompoundType int + CompoundExpression interface { + Expression + Type() CompoundType + RHS() AppendableExpression + } + // An Expression that the ON CONFLICT/ON DUPLICATE KEY portion of an INSERT statement + ConflictAction int + ConflictExpression interface { + Expression + Action() ConflictAction + } + ConflictUpdateExpression interface { + ConflictExpression + TargetColumn() string + Where(expressions ...Expression) ConflictUpdateExpression + WhereClause() ExpressionList + Update() interface{} + } + CommonTableExpression interface { + Expression + IsRecursive() bool + // Returns the alias name for the extracted expression + Name() LiteralExpression + // Returns the Expression being extracted + SubQuery() Expression + } + ExpressionListType int + // A list of expressions that should be joined together + // And(I("a").Eq(10), I("b").Eq(11)) //(("a" = 10) AND ("b" = 11)) + // Or(I("a").Eq(10), I("b").Eq(11)) //(("a" = 10) OR ("b" = 11)) + ExpressionList interface { + Expression + // Returns type (e.g. OR, AND) + Type() ExpressionListType + // Slice of expressions that should be joined together + Expressions() []Expression + // Returns a new expression list with the given expressions appended to the current Expressions list + Append(...Expression) ExpressionList + + IsEmpty() bool + } + // An Identifier that can contain schema, table and column identifiers + IdentifierExpression interface { + Expression + Aliaseable + Comparable + Inable + Isable + Likeable + Rangeable + Orderable + Updateable + Distinctable + Castable + Bitwiseable + // returns true if this identifier has more more than on part (Schema, Table or Col) + // "schema" -> true //cant qualify anymore + // "schema.table" -> true + // "table" -> false + // "schema"."table"."col" -> true + // "table"."col" -> true + // "col" -> false + IsQualified() bool + // Returns a new IdentifierExpression with the specified schema + Schema(string) IdentifierExpression + // Returns the current schema + GetSchema() string + // Returns a new IdentifierExpression with the specified table + Table(string) IdentifierExpression + // Returns the current table + GetTable() string + // Returns a new IdentifierExpression with the specified column + Col(interface{}) IdentifierExpression + // Returns the current column + GetCol() interface{} + // Returns a new IdentifierExpression with the column set to * + // I("my_table").All() //"my_table".* + All() IdentifierExpression + + // Returns true if schema table and identifier are all zero values. + IsEmpty() bool + } + InsertExpression interface { + Expression + IsEmpty() bool + IsInsertFrom() bool + From() AppendableExpression + Cols() ColumnListExpression + SetCols(cols ColumnListExpression) InsertExpression + Vals() [][]interface{} + SetVals([][]interface{}) InsertExpression + } + + JoinType int + JoinExpression interface { + Expression + JoinType() JoinType + IsConditioned() bool + Table() Expression + } + // Parent type for join expressions + ConditionedJoinExpression interface { + JoinExpression + Condition() JoinCondition + IsConditionEmpty() bool + } + LateralExpression interface { + Expression + Aliaseable + Table() AppendableExpression + } + + // Expression for representing "literal" sql. + // L("col = 1") -> col = 1) + // L("? = ?", I("col"), 1) -> "col" = 1 + LiteralExpression interface { + Expression + Aliaseable + Comparable + Isable + Inable + Likeable + Rangeable + Orderable + Bitwiseable + // Returns the literal sql + Literal() string + // Arguments to be replaced within the sql + Args() []interface{} + } + + NullSortType int + SortDirection int + // An expression for specifying sort order and options + OrderedExpression interface { + Expression + // The expression being sorted + SortExpression() Expression + // Sort direction (e.g. ASC, DESC) + IsAsc() bool + // If the adapter supports it null sort type (e.g. NULLS FIRST, NULLS LAST) + NullSortType() NullSortType + // Returns a new OrderedExpression with NullSortType set to NULLS_FIRST + NullsFirst() OrderedExpression + // Returns a new OrderedExpression with NullSortType set to NULLS_LAST + NullsLast() OrderedExpression + } + + RangeOperation int + RangeExpression interface { + Expression + // Returns the operator for the expression + Op() RangeOperation + // The left hand side of the expression (e.g. I("a") + LHS() Expression + // The right hand side of the expression could be a primitive value, dataset, or expression + RHS() RangeVal + } + RangeVal interface { + Start() interface{} + End() interface{} + } + + Windowable interface { + Over(WindowExpression) SQLWindowFunctionExpression + OverName(IdentifierExpression) SQLWindowFunctionExpression + } + + // Expression for representing a SQLFunction(e.g. COUNT, SUM, MIN, MAX...) + SQLFunctionExpression interface { + Expression + Aliaseable + Rangeable + Comparable + Orderable + Isable + Inable + Likeable + Windowable + // The function name + Name() string + // Arguments to be passed to the function + Args() []interface{} + } + + UpdateExpression interface { + Col() IdentifierExpression + Val() interface{} + } + + SQLWindowFunctionExpression interface { + Expression + Aliaseable + Rangeable + Comparable + Orderable + Isable + Inable + Likeable + Func() SQLFunctionExpression + + Window() WindowExpression + WindowName() IdentifierExpression + + HasWindow() bool + HasWindowName() bool + } + + WindowExpression interface { + Expression + + Name() IdentifierExpression + HasName() bool + + Parent() IdentifierExpression + HasParent() bool + PartitionCols() ColumnListExpression + HasPartitionBy() bool + OrderCols() ColumnListExpression + HasOrder() bool + + Inherit(parent string) WindowExpression + PartitionBy(cols ...interface{}) WindowExpression + OrderBy(cols ...interface{}) WindowExpression + } + CaseElse interface { + Result() interface{} + } + CaseWhen interface { + Condition() interface{} + Result() interface{} + } + CaseExpression interface { + Expression + Aliaseable + Orderable + GetValue() interface{} + GetWhens() []CaseWhen + GetElse() CaseElse + Value(val interface{}) CaseExpression + When(condition, result interface{}) CaseExpression + Else(result interface{}) CaseExpression + } +) + +const ( + UnionCompoundType CompoundType = iota + UnionAllCompoundType + IntersectCompoundType + IntersectAllCompoundType + + DoNothingConflictAction ConflictAction = iota + DoUpdateConflictAction + + AndType ExpressionListType = iota + OrType + + InnerJoinType JoinType = iota + FullOuterJoinType + RightOuterJoinType + LeftOuterJoinType + FullJoinType + RightJoinType + LeftJoinType + NaturalJoinType + NaturalLeftJoinType + NaturalRightJoinType + NaturalFullJoinType + CrossJoinType + + UsingJoinCondType JoinConditionType = iota + OnJoinCondType + + // Default null sort type with no null sort order + NoNullsSortType NullSortType = iota + // NULLS FIRST + NullsFirstSortType + // NULLS LAST + NullsLastSortType + // ASC + AscDir SortDirection = iota + // DESC + DescSortDir + + // BETWEEN + BetweenOp RangeOperation = iota + // NOT BETWEEN + NotBetweenOp + + // = + EqOp BooleanOperation = iota + // != or <> + NeqOp + // IS + IsOp + // IS NOT + IsNotOp + // > + GtOp + // >= + GteOp + // < + LtOp + // <= + LteOp + // IN + InOp + // NOT IN + NotInOp + // LIKE, LIKE BINARY... + LikeOp + // NOT LIKE, NOT LIKE BINARY... + NotLikeOp + // ILIKE, LIKE + ILikeOp + // NOT ILIKE, NOT LIKE + NotILikeOp + // ~, REGEXP BINARY + RegexpLikeOp + // !~, NOT REGEXP BINARY + RegexpNotLikeOp + // ~*, REGEXP + RegexpILikeOp + // !~*, NOT REGEXP + RegexpNotILikeOp + + betweenStr = "between" + + BitwiseInversionOp BitwiseOperation = iota + BitwiseOrOp + BitwiseAndOp + BitwiseXorOp + BitwiseLeftShiftOp + BitwiseRightShiftOp +) + +var ( + ConditionedJoinTypes = map[JoinType]bool{ + InnerJoinType: true, + FullOuterJoinType: true, + RightOuterJoinType: true, + LeftOuterJoinType: true, + FullJoinType: true, + RightJoinType: true, + LeftJoinType: true, + } + // used internally for inverting operators + operatorInversions = map[BooleanOperation]BooleanOperation{ + IsOp: IsNotOp, + EqOp: NeqOp, + GtOp: LteOp, + GteOp: LtOp, + LtOp: GteOp, + LteOp: GtOp, + InOp: NotInOp, + LikeOp: NotLikeOp, + ILikeOp: NotILikeOp, + RegexpLikeOp: RegexpNotLikeOp, + RegexpILikeOp: RegexpNotILikeOp, + IsNotOp: IsOp, + NeqOp: EqOp, + NotInOp: InOp, + NotLikeOp: LikeOp, + NotILikeOp: ILikeOp, + RegexpNotLikeOp: RegexpLikeOp, + RegexpNotILikeOp: RegexpILikeOp, + } +) + +func (bo BooleanOperation) String() string { + switch bo { + case EqOp: + return "eq" + case NeqOp: + return "neq" + case IsOp: + return "is" + case IsNotOp: + return "isnot" + case GtOp: + return "gt" + case GteOp: + return "gte" + case LtOp: + return "lt" + case LteOp: + return "lte" + case InOp: + return "in" + case NotInOp: + return "notin" + case LikeOp: + return "like" + case NotLikeOp: + return "notlike" + case ILikeOp: + return "ilike" + case NotILikeOp: + return "notilike" + case RegexpLikeOp: + return "regexplike" + case RegexpNotLikeOp: + return "regexpnotlike" + case RegexpILikeOp: + return "regexpilike" + case RegexpNotILikeOp: + return "regexpnotilike" + } + return fmt.Sprintf("%d", bo) +} + +func (bi BitwiseOperation) String() string { + switch bi { + case BitwiseInversionOp: + return "Inversion" + case BitwiseOrOp: + return "OR" + case BitwiseAndOp: + return "AND" + case BitwiseXorOp: + return "XOR" + case BitwiseLeftShiftOp: + return "Left Shift" + case BitwiseRightShiftOp: + return "Right Shift" + } + return fmt.Sprintf("%d", bi) +} + +func (ro RangeOperation) String() string { + switch ro { + case BetweenOp: + return betweenStr + case NotBetweenOp: + return "not between" + } + return fmt.Sprintf("%d", ro) +} + +func (jt JoinType) String() string { + switch jt { + case InnerJoinType: + return "InnerJoinType" + case FullOuterJoinType: + return "FullOuterJoinType" + case RightOuterJoinType: + return "RightOuterJoinType" + case LeftOuterJoinType: + return "LeftOuterJoinType" + case FullJoinType: + return "FullJoinType" + case RightJoinType: + return "RightJoinType" + case LeftJoinType: + return "LeftJoinType" + case NaturalJoinType: + return "NaturalJoinType" + case NaturalLeftJoinType: + return "NaturalLeftJoinType" + case NaturalRightJoinType: + return "NaturalRightJoinType" + case NaturalFullJoinType: + return "NaturalFullJoinType" + case CrossJoinType: + return "CrossJoinType" + } + return fmt.Sprintf("%d", jt) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/exp_list.go b/vendor/github.com/doug-martin/goqu/v9/exp/exp_list.go new file mode 100644 index 000000000..ff2bbc967 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/exp_list.go @@ -0,0 +1,66 @@ +package exp + +type ( + expressionList struct { + operator ExpressionListType + expressions []Expression + } +) + +// A list of expressions that should be ORed together +// Or(I("a").Eq(10), I("b").Eq(11)) //(("a" = 10) OR ("b" = 11)) +func NewExpressionList(operator ExpressionListType, expressions ...Expression) ExpressionList { + el := expressionList{operator: operator} + exps := make([]Expression, 0, len(el.expressions)) + for _, e := range expressions { + switch t := e.(type) { + case ExpressionList: + if !t.IsEmpty() { + exps = append(exps, e) + } + case Ex: + if len(t) > 0 { + exps = append(exps, e) + } + case ExOr: + if len(t) > 0 { + exps = append(exps, e) + } + default: + exps = append(exps, e) + } + } + el.expressions = exps + return el +} + +func (el expressionList) Clone() Expression { + newExps := make([]Expression, 0, len(el.expressions)) + for _, exp := range el.expressions { + newExps = append(newExps, exp.Clone()) + } + return expressionList{operator: el.operator, expressions: newExps} +} + +func (el expressionList) Expression() Expression { + return el +} + +func (el expressionList) IsEmpty() bool { + return len(el.expressions) == 0 +} + +func (el expressionList) Type() ExpressionListType { + return el.operator +} + +func (el expressionList) Expressions() []Expression { + return el.expressions +} + +func (el expressionList) Append(expressions ...Expression) ExpressionList { + exps := make([]Expression, 0, len(el.expressions)+len(expressions)) + exps = append(exps, el.expressions...) + exps = append(exps, expressions...) + return NewExpressionList(el.operator, exps...) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/exp_map.go b/vendor/github.com/doug-martin/goqu/v9/exp/exp_map.go new file mode 100644 index 000000000..3fea06b10 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/exp_map.go @@ -0,0 +1,164 @@ +package exp + +import ( + "sort" + "strings" + + "github.com/doug-martin/goqu/v9/internal/errors" +) + +type ( + // A map of expressions to be ANDed together where the keys are string that will be used as Identifiers and values + // will be used in a boolean operation. + // The Ex map can be used in tandem with Op map to create more complex expression such as LIKE, GT, LT... + // See examples. + Ex map[string]interface{} + // A map of expressions to be ORed together where the keys are string that will be used as Identifiers and values + // will be used in a boolean operation. + // The Ex map can be used in tandem with Op map to create more complex expression such as LIKE, GT, LT... + // See examples. + ExOr map[string]interface{} + // Used in tandem with the Ex map to create complex comparisons such as LIKE, GT, LT... See examples + Op map[string]interface{} +) + +func (e Ex) Expression() Expression { + return e +} + +func (e Ex) Clone() Expression { + ret := Ex{} + for key, val := range e { + ret[key] = val + } + return ret +} + +func (e Ex) IsEmpty() bool { + return len(e) == 0 +} + +func (e Ex) ToExpressions() (ExpressionList, error) { + return mapToExpressionList(e, AndType) +} + +func (eo ExOr) Expression() Expression { + return eo +} + +func (eo ExOr) Clone() Expression { + ret := ExOr{} + for key, val := range eo { + ret[key] = val + } + return ret +} + +func (eo ExOr) IsEmpty() bool { + return len(eo) == 0 +} + +func (eo ExOr) ToExpressions() (ExpressionList, error) { + return mapToExpressionList(eo, OrType) +} + +func getExMapKeys(ex map[string]interface{}) []string { + keys := make([]string, 0, len(ex)) + for key := range ex { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func mapToExpressionList(ex map[string]interface{}, eType ExpressionListType) (ExpressionList, error) { + keys := getExMapKeys(ex) + ret := make([]Expression, 0, len(keys)) + for _, key := range keys { + lhs := ParseIdentifier(key) + rhs := ex[key] + var exp Expression + if op, ok := rhs.(Op); ok { + ors, err := createOredExpressionFromMap(lhs, op) + if err != nil { + return nil, err + } + exp = NewExpressionList(OrType, ors...) + } else { + exp = lhs.Eq(rhs) + } + ret = append(ret, exp) + } + if eType == OrType { + return NewExpressionList(OrType, ret...), nil + } + return NewExpressionList(AndType, ret...), nil +} + +func createOredExpressionFromMap(lhs IdentifierExpression, op Op) ([]Expression, error) { + opKeys := getExMapKeys(op) + ors := make([]Expression, 0, len(opKeys)) + for _, opKey := range opKeys { + if exp, err := createExpressionFromOp(lhs, opKey, op); err != nil { + return nil, err + } else if exp != nil { + ors = append(ors, exp) + } + } + return ors, nil +} + +// nolint:gocyclo // not complex just long +func createExpressionFromOp(lhs IdentifierExpression, opKey string, op Op) (exp Expression, err error) { + switch strings.ToLower(opKey) { + case EqOp.String(): + exp = lhs.Eq(op[opKey]) + case NeqOp.String(): + exp = lhs.Neq(op[opKey]) + case IsOp.String(): + exp = lhs.Is(op[opKey]) + case IsNotOp.String(): + exp = lhs.IsNot(op[opKey]) + case GtOp.String(): + exp = lhs.Gt(op[opKey]) + case GteOp.String(): + exp = lhs.Gte(op[opKey]) + case LtOp.String(): + exp = lhs.Lt(op[opKey]) + case LteOp.String(): + exp = lhs.Lte(op[opKey]) + case InOp.String(): + exp = lhs.In(op[opKey]) + case NotInOp.String(): + exp = lhs.NotIn(op[opKey]) + case LikeOp.String(): + exp = lhs.Like(op[opKey]) + case NotLikeOp.String(): + exp = lhs.NotLike(op[opKey]) + case ILikeOp.String(): + exp = lhs.ILike(op[opKey]) + case NotILikeOp.String(): + exp = lhs.NotILike(op[opKey]) + case RegexpLikeOp.String(): + exp = lhs.RegexpLike(op[opKey]) + case RegexpNotLikeOp.String(): + exp = lhs.RegexpNotLike(op[opKey]) + case RegexpILikeOp.String(): + exp = lhs.RegexpILike(op[opKey]) + case RegexpNotILikeOp.String(): + exp = lhs.RegexpNotILike(op[opKey]) + case betweenStr: + rangeVal, ok := op[opKey].(RangeVal) + if ok { + exp = lhs.Between(rangeVal) + } + case "notbetween": + rangeVal, ok := op[opKey].(RangeVal) + if ok { + exp = lhs.NotBetween(rangeVal) + } + default: + err = errors.New("unsupported expression type %s", opKey) + } + return exp, err +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/func.go b/vendor/github.com/doug-martin/goqu/v9/exp/func.go new file mode 100644 index 000000000..0ab8cfd94 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/func.go @@ -0,0 +1,89 @@ +package exp + +type ( + sqlFunctionExpression struct { + name string + args []interface{} + } +) + +// Creates a new SQLFunctionExpression with the given name and arguments +func NewSQLFunctionExpression(name string, args ...interface{}) SQLFunctionExpression { + return sqlFunctionExpression{name: name, args: args} +} + +func (sfe sqlFunctionExpression) Clone() Expression { + return sqlFunctionExpression{name: sfe.name, args: sfe.args} +} + +func (sfe sqlFunctionExpression) Expression() Expression { return sfe } + +func (sfe sqlFunctionExpression) Args() []interface{} { return sfe.args } + +func (sfe sqlFunctionExpression) Name() string { return sfe.name } + +func (sfe sqlFunctionExpression) As(val interface{}) AliasedExpression { + return NewAliasExpression(sfe, val) +} + +func (sfe sqlFunctionExpression) Eq(val interface{}) BooleanExpression { return eq(sfe, val) } +func (sfe sqlFunctionExpression) Neq(val interface{}) BooleanExpression { return neq(sfe, val) } + +func (sfe sqlFunctionExpression) Gt(val interface{}) BooleanExpression { return gt(sfe, val) } +func (sfe sqlFunctionExpression) Gte(val interface{}) BooleanExpression { return gte(sfe, val) } +func (sfe sqlFunctionExpression) Lt(val interface{}) BooleanExpression { return lt(sfe, val) } +func (sfe sqlFunctionExpression) Lte(val interface{}) BooleanExpression { return lte(sfe, val) } + +func (sfe sqlFunctionExpression) Between(val RangeVal) RangeExpression { return between(sfe, val) } + +func (sfe sqlFunctionExpression) NotBetween(val RangeVal) RangeExpression { + return notBetween(sfe, val) +} + +func (sfe sqlFunctionExpression) Like(val interface{}) BooleanExpression { return like(sfe, val) } +func (sfe sqlFunctionExpression) NotLike(val interface{}) BooleanExpression { return notLike(sfe, val) } +func (sfe sqlFunctionExpression) ILike(val interface{}) BooleanExpression { return iLike(sfe, val) } + +func (sfe sqlFunctionExpression) NotILike(val interface{}) BooleanExpression { + return notILike(sfe, val) +} + +func (sfe sqlFunctionExpression) RegexpLike(val interface{}) BooleanExpression { + return regexpLike(sfe, val) +} + +func (sfe sqlFunctionExpression) RegexpNotLike(val interface{}) BooleanExpression { + return regexpNotLike(sfe, val) +} + +func (sfe sqlFunctionExpression) RegexpILike(val interface{}) BooleanExpression { + return regexpILike(sfe, val) +} + +func (sfe sqlFunctionExpression) RegexpNotILike(val interface{}) BooleanExpression { + return regexpNotILike(sfe, val) +} + +func (sfe sqlFunctionExpression) In(vals ...interface{}) BooleanExpression { return in(sfe, vals...) } +func (sfe sqlFunctionExpression) NotIn(vals ...interface{}) BooleanExpression { + return notIn(sfe, vals...) +} +func (sfe sqlFunctionExpression) Is(val interface{}) BooleanExpression { return is(sfe, val) } +func (sfe sqlFunctionExpression) IsNot(val interface{}) BooleanExpression { return isNot(sfe, val) } +func (sfe sqlFunctionExpression) IsNull() BooleanExpression { return is(sfe, nil) } +func (sfe sqlFunctionExpression) IsNotNull() BooleanExpression { return isNot(sfe, nil) } +func (sfe sqlFunctionExpression) IsTrue() BooleanExpression { return is(sfe, true) } +func (sfe sqlFunctionExpression) IsNotTrue() BooleanExpression { return isNot(sfe, true) } +func (sfe sqlFunctionExpression) IsFalse() BooleanExpression { return is(sfe, false) } +func (sfe sqlFunctionExpression) IsNotFalse() BooleanExpression { return isNot(sfe, false) } + +func (sfe sqlFunctionExpression) Over(we WindowExpression) SQLWindowFunctionExpression { + return NewSQLWindowFunctionExpression(sfe, nil, we) +} + +func (sfe sqlFunctionExpression) OverName(windowName IdentifierExpression) SQLWindowFunctionExpression { + return NewSQLWindowFunctionExpression(sfe, windowName, nil) +} + +func (sfe sqlFunctionExpression) Asc() OrderedExpression { return asc(sfe) } +func (sfe sqlFunctionExpression) Desc() OrderedExpression { return desc(sfe) } diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/ident.go b/vendor/github.com/doug-martin/goqu/v9/exp/ident.go new file mode 100644 index 000000000..aebbbd5a9 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/ident.go @@ -0,0 +1,213 @@ +package exp + +import ( + "strings" +) + +type ( + identifier struct { + schema string + table string + col interface{} + } +) + +var ( + tableAndColumnParts = 2 + schemaTableAndColumnIdentifierParts = 3 +) + +func ParseIdentifier(ident string) IdentifierExpression { + parts := strings.Split(ident, ".") + switch len(parts) { + case tableAndColumnParts: + return NewIdentifierExpression("", parts[0], parts[1]) + case schemaTableAndColumnIdentifierParts: + return NewIdentifierExpression(parts[0], parts[1], parts[2]) + } + return NewIdentifierExpression("", "", ident) +} + +func NewIdentifierExpression(schema, table string, col interface{}) IdentifierExpression { + return identifier{}.Schema(schema).Table(table).Col(col) +} + +func (i identifier) clone() identifier { + return identifier{schema: i.schema, table: i.table, col: i.col} +} + +func (i identifier) Clone() Expression { + return i.clone() +} + +func (i identifier) IsQualified() bool { + schema, table, col := i.schema, i.table, i.col + switch c := col.(type) { + case string: + if c != "" { + return len(table) > 0 || len(schema) > 0 + } + default: + if c != nil { + return len(table) > 0 || len(schema) > 0 + } + } + if len(table) > 0 { + return len(schema) > 0 + } + return false +} + +// Sets the table on the current identifier +// I("col").Table("table") -> "table"."col" //postgres +// I("col").Table("table") -> `table`.`col` //mysql +// I("col").Table("table") -> `table`.`col` //sqlite3 +func (i identifier) Table(table string) IdentifierExpression { + i.table = table + return i +} + +func (i identifier) GetTable() string { + return i.table +} + +// Sets the table on the current identifier +// I("table").Schema("schema") -> "schema"."table" //postgres +// I("col").Schema("table") -> `schema`.`table` //mysql +// I("col").Schema("table") -> `schema`.`table` //sqlite3 +func (i identifier) Schema(schema string) IdentifierExpression { + i.schema = schema + return i +} + +func (i identifier) GetSchema() string { + return i.schema +} + +// Sets the table on the current identifier +// I("table").Col("col") -> "table"."col" //postgres +// I("table").Schema("col") -> `table`.`col` //mysql +// I("table").Schema("col") -> `table`.`col` //sqlite3 +func (i identifier) Col(col interface{}) IdentifierExpression { + if col == "*" { + i.col = Star() + } else { + i.col = col + } + return i +} + +func (i identifier) Expression() Expression { return i } + +// Qualifies the epression with a * literal (e.g. "table".*) +func (i identifier) All() IdentifierExpression { return i.Col("*") } + +func (i identifier) IsEmpty() bool { + isEmpty := i.schema == "" && i.table == "" + if isEmpty { + switch t := i.col.(type) { + case nil: + return true + case string: + return t == "" + default: + return false + } + } + return isEmpty +} + +// Gets the column identifier +func (i identifier) GetCol() interface{} { return i.col } + +// Used within updates to set a column value +func (i identifier) Set(val interface{}) UpdateExpression { return set(i, val) } + +// Alias an identifier (e.g "my_col" AS "other_col") +func (i identifier) As(val interface{}) AliasedExpression { + if v, ok := val.(string); ok { + ident := ParseIdentifier(v) + if i.col != nil && i.col != "" { + return NewAliasExpression(i, ident) + } + aliasCol := ident.GetCol() + if i.table != "" { + return NewAliasExpression(i, NewIdentifierExpression("", aliasCol.(string), nil)) + } else if i.schema != "" { + return NewAliasExpression(i, NewIdentifierExpression(aliasCol.(string), "", nil)) + } + } + return NewAliasExpression(i, val) +} + +// Returns a BooleanExpression for equality (e.g "my_col" = 1) +func (i identifier) Eq(val interface{}) BooleanExpression { return eq(i, val) } + +// Returns a BooleanExpression for in equality (e.g "my_col" != 1) +func (i identifier) Neq(val interface{}) BooleanExpression { return neq(i, val) } + +// Returns a BooleanExpression for checking that a identifier is greater than another value (e.g "my_col" > 1) +func (i identifier) Gt(val interface{}) BooleanExpression { return gt(i, val) } + +// Returns a BooleanExpression for checking that a identifier is greater than or equal to another value +// (e.g "my_col" >= 1) +func (i identifier) Gte(val interface{}) BooleanExpression { return gte(i, val) } + +// Returns a BooleanExpression for checking that a identifier is less than another value (e.g "my_col" < 1) +func (i identifier) Lt(val interface{}) BooleanExpression { return lt(i, val) } + +// Returns a BooleanExpression for checking that a identifier is less than or equal to another value +// (e.g "my_col" <= 1) +func (i identifier) Lte(val interface{}) BooleanExpression { return lte(i, val) } + +// Returns a BooleanExpression for bit inversion (e.g ~ "my_col") +func (i identifier) BitwiseInversion() BitwiseExpression { return bitwiseInversion(i) } + +// Returns a BooleanExpression for bit OR (e.g "my_col" | 1) +func (i identifier) BitwiseOr(val interface{}) BitwiseExpression { return bitwiseOr(i, val) } + +// Returns a BooleanExpression for bit AND (e.g "my_col" & 1) +func (i identifier) BitwiseAnd(val interface{}) BitwiseExpression { return bitwiseAnd(i, val) } + +// Returns a BooleanExpression for bit XOR (e.g "my_col" ^ 1) +func (i identifier) BitwiseXor(val interface{}) BitwiseExpression { return bitwiseXor(i, val) } + +// Returns a BooleanExpression for bit LEFT shift (e.g "my_col" << 1) +func (i identifier) BitwiseLeftShift(val interface{}) BitwiseExpression { + return bitwiseLeftShift(i, val) +} + +// Returns a BooleanExpression for bit RIGHT shift (e.g "my_col" >> 1) +func (i identifier) BitwiseRightShift(val interface{}) BitwiseExpression { + return bitwiseRightShift(i, val) +} + +// Returns a BooleanExpression for checking that a identifier is in a list of values or (e.g "my_col" > 1) +func (i identifier) In(vals ...interface{}) BooleanExpression { return in(i, vals...) } +func (i identifier) NotIn(vals ...interface{}) BooleanExpression { return notIn(i, vals...) } +func (i identifier) Like(val interface{}) BooleanExpression { return like(i, val) } +func (i identifier) NotLike(val interface{}) BooleanExpression { return notLike(i, val) } +func (i identifier) ILike(val interface{}) BooleanExpression { return iLike(i, val) } +func (i identifier) NotILike(val interface{}) BooleanExpression { return notILike(i, val) } +func (i identifier) RegexpLike(val interface{}) BooleanExpression { return regexpLike(i, val) } +func (i identifier) RegexpNotLike(val interface{}) BooleanExpression { return regexpNotLike(i, val) } +func (i identifier) RegexpILike(val interface{}) BooleanExpression { return regexpILike(i, val) } +func (i identifier) RegexpNotILike(val interface{}) BooleanExpression { return regexpNotILike(i, val) } +func (i identifier) Is(val interface{}) BooleanExpression { return is(i, val) } +func (i identifier) IsNot(val interface{}) BooleanExpression { return isNot(i, val) } +func (i identifier) IsNull() BooleanExpression { return is(i, nil) } +func (i identifier) IsNotNull() BooleanExpression { return isNot(i, nil) } +func (i identifier) IsTrue() BooleanExpression { return is(i, true) } +func (i identifier) IsNotTrue() BooleanExpression { return isNot(i, true) } +func (i identifier) IsFalse() BooleanExpression { return is(i, false) } +func (i identifier) IsNotFalse() BooleanExpression { return isNot(i, false) } +func (i identifier) Asc() OrderedExpression { return asc(i) } +func (i identifier) Desc() OrderedExpression { return desc(i) } +func (i identifier) Distinct() SQLFunctionExpression { return NewSQLFunctionExpression("DISTINCT", i) } +func (i identifier) Cast(t string) CastExpression { return NewCastExpression(i, t) } + +// Returns a RangeExpression for checking that a identifier is between two values (e.g "my_col" BETWEEN 1 AND 10) +func (i identifier) Between(val RangeVal) RangeExpression { return between(i, val) } + +// Returns a RangeExpression for checking that a identifier is between two values (e.g "my_col" BETWEEN 1 AND 10) +func (i identifier) NotBetween(val RangeVal) RangeExpression { return notBetween(i, val) } diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/insert.go b/vendor/github.com/doug-martin/goqu/v9/exp/insert.go new file mode 100644 index 000000000..7f58b3cd0 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/insert.go @@ -0,0 +1,163 @@ +package exp + +import ( + "reflect" + "sort" + + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/util" +) + +type ( + insert struct { + from AppendableExpression + cols ColumnListExpression + vals [][]interface{} + } +) + +func NewInsertExpression(rows ...interface{}) (insertExpression InsertExpression, err error) { + switch len(rows) { + case 0: + return new(insert), nil + case 1: + val := reflect.ValueOf(rows[0]) + if val.Kind() == reflect.Slice { + vals := make([]interface{}, 0, val.Len()) + for i := 0; i < val.Len(); i++ { + vals = append(vals, val.Index(i).Interface()) + } + return NewInsertExpression(vals...) + } + if ae, ok := rows[0].(AppendableExpression); ok { + return &insert{from: ae}, nil + } + } + return newInsert(rows...) +} + +func (i *insert) Expression() Expression { + return i +} + +func (i *insert) Clone() Expression { + return i.clone() +} + +func (i *insert) clone() *insert { + return &insert{from: i.from, cols: i.cols, vals: i.vals} +} + +func (i *insert) IsEmpty() bool { + return i.from == nil && (i.cols == nil || i.cols.IsEmpty()) +} + +func (i *insert) IsInsertFrom() bool { + return i.from != nil +} + +func (i *insert) From() AppendableExpression { + return i.from +} + +func (i *insert) Cols() ColumnListExpression { + return i.cols +} + +func (i *insert) SetCols(cols ColumnListExpression) InsertExpression { + ci := i.clone() + ci.cols = cols + return ci +} + +func (i *insert) Vals() [][]interface{} { + return i.vals +} + +func (i *insert) SetVals(vals [][]interface{}) InsertExpression { + ci := i.clone() + ci.vals = vals + return ci +} + +// parses the rows gathering and sorting unique columns and values for each record +func newInsert(rows ...interface{}) (insertExp InsertExpression, err error) { + var mapKeys util.ValueSlice + rowValue := reflect.Indirect(reflect.ValueOf(rows[0])) + rowType := rowValue.Type() + rowKind := rowValue.Kind() + if rowKind == reflect.Struct { + return createStructSliceInsert(rows...) + } + vals := make([][]interface{}, 0, len(rows)) + var columns ColumnListExpression + for _, row := range rows { + if rowType != reflect.Indirect(reflect.ValueOf(row)).Type() { + return nil, errors.New( + "rows must be all the same type expected %+v got %+v", + rowType, + reflect.Indirect(reflect.ValueOf(row)).Type(), + ) + } + newRowValue := reflect.Indirect(reflect.ValueOf(row)) + switch rowKind { + case reflect.Map: + if columns == nil { + mapKeys = util.ValueSlice(newRowValue.MapKeys()) + sort.Sort(mapKeys) + colKeys := make([]interface{}, 0, len(mapKeys)) + for _, key := range mapKeys { + colKeys = append(colKeys, key.Interface()) + } + columns = NewColumnListExpression(colKeys...) + } + newMapKeys := util.ValueSlice(newRowValue.MapKeys()) + if len(newMapKeys) != len(mapKeys) { + return nil, errors.New("rows with different value length expected %d got %d", len(mapKeys), len(newMapKeys)) + } + if !mapKeys.Equal(newMapKeys) { + return nil, errors.New("rows with different keys expected %s got %s", mapKeys.String(), newMapKeys.String()) + } + rowVals := make([]interface{}, 0, len(mapKeys)) + for _, key := range mapKeys { + rowVals = append(rowVals, newRowValue.MapIndex(key).Interface()) + } + vals = append(vals, rowVals) + default: + return nil, errors.New( + "unsupported insert must be map, goqu.Record, or struct type got: %T", + row, + ) + } + } + return &insert{cols: columns, vals: vals}, nil +} + +func createStructSliceInsert(rows ...interface{}) (insertExp InsertExpression, err error) { + rowValue := reflect.Indirect(reflect.ValueOf(rows[0])) + rowType := rowValue.Type() + recordRows := make([]interface{}, 0, len(rows)) + for _, row := range rows { + if rowType != reflect.Indirect(reflect.ValueOf(row)).Type() { + return nil, errors.New( + "rows must be all the same type expected %+v got %+v", + rowType, + reflect.Indirect(reflect.ValueOf(row)).Type(), + ) + } + newRowValue := reflect.Indirect(reflect.ValueOf(row)) + record, err := getFieldsValuesFromStruct(newRowValue) + if err != nil { + return nil, err + } + recordRows = append(recordRows, record) + } + return newInsert(recordRows...) +} + +func getFieldsValuesFromStruct(value reflect.Value) (row Record, err error) { + if value.IsValid() { + return NewRecordFromStruct(value.Interface(), true, false) + } + return +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/insert_clauses.go b/vendor/github.com/doug-martin/goqu/v9/exp/insert_clauses.go new file mode 100644 index 000000000..ac7a330d3 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/insert_clauses.go @@ -0,0 +1,205 @@ +package exp + +type ( + InsertClauses interface { + CommonTables() []CommonTableExpression + CommonTablesAppend(cte CommonTableExpression) InsertClauses + + HasInto() bool + clone() *insertClauses + + Cols() ColumnListExpression + HasCols() bool + ColsAppend(cols ColumnListExpression) InsertClauses + SetCols(cols ColumnListExpression) InsertClauses + + Into() Expression + SetInto(cl Expression) InsertClauses + + Returning() ColumnListExpression + HasReturning() bool + SetReturning(cl ColumnListExpression) InsertClauses + + From() AppendableExpression + HasFrom() bool + SetFrom(ae AppendableExpression) InsertClauses + + Rows() []interface{} + HasRows() bool + SetRows(rows []interface{}) InsertClauses + + HasAlias() bool + Alias() IdentifierExpression + SetAlias(ie IdentifierExpression) InsertClauses + + Vals() [][]interface{} + HasVals() bool + SetVals(vals [][]interface{}) InsertClauses + ValsAppend(vals [][]interface{}) InsertClauses + + OnConflict() ConflictExpression + SetOnConflict(expression ConflictExpression) InsertClauses + } + insertClauses struct { + commonTables []CommonTableExpression + cols ColumnListExpression + into Expression + returning ColumnListExpression + alias IdentifierExpression + rows []interface{} + values [][]interface{} + from AppendableExpression + conflict ConflictExpression + } +) + +func NewInsertClauses() InsertClauses { + return &insertClauses{} +} + +func (ic *insertClauses) HasInto() bool { + return ic.into != nil +} + +func (ic *insertClauses) clone() *insertClauses { + return &insertClauses{ + commonTables: ic.commonTables, + cols: ic.cols, + into: ic.into, + returning: ic.returning, + alias: ic.alias, + rows: ic.rows, + values: ic.values, + from: ic.from, + conflict: ic.conflict, + } +} + +func (ic *insertClauses) CommonTables() []CommonTableExpression { + return ic.commonTables +} + +func (ic *insertClauses) CommonTablesAppend(cte CommonTableExpression) InsertClauses { + ret := ic.clone() + ret.commonTables = append(ret.commonTables, cte) + return ret +} + +func (ic *insertClauses) Cols() ColumnListExpression { + return ic.cols +} + +func (ic *insertClauses) HasCols() bool { + return ic.cols != nil && !ic.cols.IsEmpty() +} + +func (ic *insertClauses) ColsAppend(cl ColumnListExpression) InsertClauses { + ret := ic.clone() + ret.cols = ret.cols.Append(cl.Columns()...) + return ret +} + +func (ic *insertClauses) SetCols(cl ColumnListExpression) InsertClauses { + ret := ic.clone() + ret.cols = cl + return ret +} + +func (ic *insertClauses) Into() Expression { + return ic.into +} + +func (ic *insertClauses) SetInto(into Expression) InsertClauses { + ret := ic.clone() + ret.into = into + return ret +} + +func (ic *insertClauses) Returning() ColumnListExpression { + return ic.returning +} + +func (ic *insertClauses) HasReturning() bool { + return ic.returning != nil && !ic.returning.IsEmpty() +} + +func (ic *insertClauses) HasAlias() bool { + return ic.alias != nil +} + +func (ic *insertClauses) Alias() IdentifierExpression { + return ic.alias +} + +func (ic *insertClauses) SetAlias(ie IdentifierExpression) InsertClauses { + ret := ic.clone() + ret.alias = ie + return ret +} + +func (ic *insertClauses) SetReturning(cl ColumnListExpression) InsertClauses { + ret := ic.clone() + ret.returning = cl + return ret +} + +func (ic *insertClauses) From() AppendableExpression { + return ic.from +} + +func (ic *insertClauses) HasFrom() bool { + return ic.from != nil +} + +func (ic *insertClauses) SetFrom(ae AppendableExpression) InsertClauses { + ret := ic.clone() + ret.from = ae + return ret +} + +func (ic *insertClauses) Rows() []interface{} { + return ic.rows +} + +func (ic *insertClauses) HasRows() bool { + return ic.rows != nil && len(ic.rows) > 0 +} + +func (ic *insertClauses) SetRows(rows []interface{}) InsertClauses { + ret := ic.clone() + ret.rows = rows + return ret +} + +func (ic *insertClauses) Vals() [][]interface{} { + return ic.values +} + +func (ic *insertClauses) HasVals() bool { + return ic.values != nil && len(ic.values) > 0 +} + +func (ic *insertClauses) SetVals(vals [][]interface{}) InsertClauses { + ret := ic.clone() + ret.values = vals + return ret +} + +func (ic *insertClauses) ValsAppend(vals [][]interface{}) InsertClauses { + ret := ic.clone() + newVals := make([][]interface{}, 0, len(ic.values)+len(vals)) + newVals = append(newVals, ic.values...) + newVals = append(newVals, vals...) + ret.values = newVals + return ret +} + +func (ic *insertClauses) OnConflict() ConflictExpression { + return ic.conflict +} + +func (ic *insertClauses) SetOnConflict(expression ConflictExpression) InsertClauses { + ret := ic.clone() + ret.conflict = expression + return ret +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/join.go b/vendor/github.com/doug-martin/goqu/v9/exp/join.go new file mode 100644 index 000000000..722858ed7 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/join.go @@ -0,0 +1,139 @@ +package exp + +type ( + joinExpression struct { + isConditioned bool + // The JoinType + joinType JoinType + // The table expressions (e.g. LEFT JOIN "my_table", ON (....)) + table Expression + } + // Container for all joins within a dataset + conditionedJoin struct { + joinExpression + // The condition to join (e.g. USING("a", "b"), ON("my_table"."fkey" = "other_table"."id") + condition JoinCondition + } + JoinExpressions []JoinExpression +) + +func NewUnConditionedJoinExpression(joinType JoinType, table Expression) JoinExpression { + return joinExpression{ + joinType: joinType, + table: table, + isConditioned: false, + } +} + +func (je joinExpression) Clone() Expression { + return je +} + +func (je joinExpression) Expression() Expression { + return je +} + +func (je joinExpression) IsConditioned() bool { + return je.isConditioned +} + +func (je joinExpression) JoinType() JoinType { + return je.joinType +} + +func (je joinExpression) Table() Expression { + return je.table +} + +func NewConditionedJoinExpression(joinType JoinType, table Expression, condition JoinCondition) ConditionedJoinExpression { + return conditionedJoin{ + joinExpression: joinExpression{ + joinType: joinType, + table: table, + isConditioned: true, + }, + condition: condition, + } +} + +func (je conditionedJoin) Clone() Expression { + return je +} + +func (je conditionedJoin) Expression() Expression { + return je +} + +func (je conditionedJoin) Condition() JoinCondition { + return je.condition +} + +func (je conditionedJoin) IsConditionEmpty() bool { + return je.condition == nil || je.condition.IsEmpty() +} + +func (jes JoinExpressions) Clone() JoinExpressions { + ret := make(JoinExpressions, 0, len(jes)) + for _, jc := range jes { + ret = append(ret, jc.Clone().(JoinExpression)) + } + return ret +} + +type ( + JoinConditionType int + JoinCondition interface { + Type() JoinConditionType + IsEmpty() bool + } + JoinOnCondition interface { + JoinCondition + On() ExpressionList + } + JoinUsingCondition interface { + JoinCondition + Using() ColumnListExpression + } + joinOnCondition struct { + on ExpressionList + } + + joinUsingCondition struct { + using ColumnListExpression + } +) + +// Creates a new ON clause to be used within a join +// ds.Join(I("my_table"), On(I("my_table.fkey").Eq(I("other_table.id"))) +func NewJoinOnCondition(expressions ...Expression) JoinCondition { + return joinOnCondition{on: NewExpressionList(AndType, expressions...)} +} + +func (joc joinOnCondition) Type() JoinConditionType { + return OnJoinCondType +} + +func (joc joinOnCondition) On() ExpressionList { + return joc.on +} + +func (joc joinOnCondition) IsEmpty() bool { + return len(joc.on.Expressions()) == 0 +} + +// Creates a new USING clause to be used within a join +func NewJoinUsingCondition(expressions ...interface{}) JoinCondition { + return joinUsingCondition{using: NewColumnListExpression(expressions...)} +} + +func (juc joinUsingCondition) Type() JoinConditionType { + return UsingJoinCondType +} + +func (juc joinUsingCondition) Using() ColumnListExpression { + return juc.using +} + +func (juc joinUsingCondition) IsEmpty() bool { + return len(juc.using.Columns()) == 0 +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/lateral.go b/vendor/github.com/doug-martin/goqu/v9/exp/lateral.go new file mode 100644 index 000000000..a3cd01314 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/lateral.go @@ -0,0 +1,24 @@ +package exp + +type ( + lateral struct { + table AppendableExpression + } +) + +// Creates a new SQL lateral expression +// L(From("test")) -> LATERAL (SELECT * FROM "tests") +func NewLateralExpression(table AppendableExpression) LateralExpression { + return lateral{table: table} +} + +func (l lateral) Clone() Expression { + return NewLateralExpression(l.table) +} + +func (l lateral) Table() AppendableExpression { + return l.table +} + +func (l lateral) Expression() Expression { return l } +func (l lateral) As(val interface{}) AliasedExpression { return NewAliasExpression(l, val) } diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/literal.go b/vendor/github.com/doug-martin/goqu/v9/exp/literal.go new file mode 100644 index 000000000..da087751b --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/literal.go @@ -0,0 +1,80 @@ +package exp + +type ( + literal struct { + literal string + args []interface{} + } +) + +// Creates a new SQL literal with the provided arguments. +// L("a = 1") -> a = 1 +// You can also you placeholders. All placeholders within a Literal are represented by '?' +// L("a = ?", "b") -> a = 'b' +// Literals can also contain placeholders for other expressions +// L("(? AND ?) OR (?)", I("a").Eq(1), I("b").Eq("b"), I("c").In([]string{"a", "b", "c"})) +func NewLiteralExpression(sql string, args ...interface{}) LiteralExpression { + return literal{literal: sql, args: args} +} + +// Returns a literal for the '*' operator +func Star() LiteralExpression { + return NewLiteralExpression("*") +} + +// Returns a literal for the 'DEFAULT' +func Default() LiteralExpression { + return NewLiteralExpression("DEFAULT") +} + +func (l literal) Clone() Expression { + return NewLiteralExpression(l.literal, l.args...) +} + +func (l literal) Literal() string { + return l.literal +} + +func (l literal) Args() []interface{} { + return l.args +} + +func (l literal) Expression() Expression { return l } +func (l literal) As(val interface{}) AliasedExpression { return NewAliasExpression(l, val) } +func (l literal) Eq(val interface{}) BooleanExpression { return eq(l, val) } +func (l literal) Neq(val interface{}) BooleanExpression { return neq(l, val) } +func (l literal) Gt(val interface{}) BooleanExpression { return gt(l, val) } +func (l literal) Gte(val interface{}) BooleanExpression { return gte(l, val) } +func (l literal) Lt(val interface{}) BooleanExpression { return lt(l, val) } +func (l literal) Lte(val interface{}) BooleanExpression { return lte(l, val) } +func (l literal) Asc() OrderedExpression { return asc(l) } +func (l literal) Desc() OrderedExpression { return desc(l) } +func (l literal) Between(val RangeVal) RangeExpression { return between(l, val) } +func (l literal) NotBetween(val RangeVal) RangeExpression { return notBetween(l, val) } +func (l literal) Like(val interface{}) BooleanExpression { return like(l, val) } +func (l literal) NotLike(val interface{}) BooleanExpression { return notLike(l, val) } +func (l literal) ILike(val interface{}) BooleanExpression { return iLike(l, val) } +func (l literal) NotILike(val interface{}) BooleanExpression { return notILike(l, val) } +func (l literal) RegexpLike(val interface{}) BooleanExpression { return regexpLike(l, val) } +func (l literal) RegexpNotLike(val interface{}) BooleanExpression { return regexpNotLike(l, val) } +func (l literal) RegexpILike(val interface{}) BooleanExpression { return regexpILike(l, val) } +func (l literal) RegexpNotILike(val interface{}) BooleanExpression { return regexpNotILike(l, val) } +func (l literal) In(vals ...interface{}) BooleanExpression { return in(l, vals...) } +func (l literal) NotIn(vals ...interface{}) BooleanExpression { return notIn(l, vals...) } +func (l literal) Is(val interface{}) BooleanExpression { return is(l, val) } +func (l literal) IsNot(val interface{}) BooleanExpression { return isNot(l, val) } +func (l literal) IsNull() BooleanExpression { return is(l, nil) } +func (l literal) IsNotNull() BooleanExpression { return isNot(l, nil) } +func (l literal) IsTrue() BooleanExpression { return is(l, true) } +func (l literal) IsNotTrue() BooleanExpression { return isNot(l, true) } +func (l literal) IsFalse() BooleanExpression { return is(l, false) } +func (l literal) IsNotFalse() BooleanExpression { return isNot(l, false) } + +func (l literal) BitwiseInversion() BitwiseExpression { return bitwiseInversion(l) } +func (l literal) BitwiseOr(val interface{}) BitwiseExpression { return bitwiseOr(l, val) } +func (l literal) BitwiseAnd(val interface{}) BitwiseExpression { return bitwiseAnd(l, val) } +func (l literal) BitwiseXor(val interface{}) BitwiseExpression { return bitwiseXor(l, val) } +func (l literal) BitwiseLeftShift(val interface{}) BitwiseExpression { return bitwiseLeftShift(l, val) } +func (l literal) BitwiseRightShift(val interface{}) BitwiseExpression { + return bitwiseRightShift(l, val) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/lock.go b/vendor/github.com/doug-martin/goqu/v9/exp/lock.go new file mode 100644 index 000000000..9b8bf72e3 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/lock.go @@ -0,0 +1,48 @@ +package exp + +type ( + LockStrength int + WaitOption int + Lock interface { + Strength() LockStrength + WaitOption() WaitOption + Of() []IdentifierExpression + } + lock struct { + strength LockStrength + waitOption WaitOption + of []IdentifierExpression + } +) + +const ( + ForNolock LockStrength = iota + ForUpdate + ForNoKeyUpdate + ForShare + ForKeyShare + + Wait WaitOption = iota + NoWait + SkipLocked +) + +func NewLock(strength LockStrength, option WaitOption, of ...IdentifierExpression) Lock { + return lock{ + strength: strength, + waitOption: option, + of: of, + } +} + +func (l lock) Strength() LockStrength { + return l.strength +} + +func (l lock) WaitOption() WaitOption { + return l.waitOption +} + +func (l lock) Of() []IdentifierExpression { + return l.of +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/order.go b/vendor/github.com/doug-martin/goqu/v9/exp/order.go new file mode 100644 index 000000000..640f1f665 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/order.go @@ -0,0 +1,52 @@ +package exp + +type ( + orderedExpression struct { + sortExpression Expression + direction SortDirection + nullSortType NullSortType + } +) + +// used internally to create a new SORT_ASC OrderedExpression +func asc(exp Expression) OrderedExpression { + return NewOrderedExpression(exp, AscDir, NoNullsSortType) +} + +// used internally to create a new SORT_DESC OrderedExpression +func desc(exp Expression) OrderedExpression { + return NewOrderedExpression(exp, DescSortDir, NoNullsSortType) +} + +// used internally to create a new SORT_ASC OrderedExpression +func NewOrderedExpression(exp Expression, direction SortDirection, sortType NullSortType) OrderedExpression { + return orderedExpression{sortExpression: exp, direction: direction, nullSortType: sortType} +} + +func (oe orderedExpression) Clone() Expression { + return NewOrderedExpression(oe.sortExpression, oe.direction, oe.nullSortType) +} + +func (oe orderedExpression) Expression() Expression { + return oe +} + +func (oe orderedExpression) SortExpression() Expression { + return oe.sortExpression +} + +func (oe orderedExpression) IsAsc() bool { + return oe.direction == AscDir +} + +func (oe orderedExpression) NullSortType() NullSortType { + return oe.nullSortType +} + +func (oe orderedExpression) NullsFirst() OrderedExpression { + return NewOrderedExpression(oe.sortExpression, oe.direction, NullsFirstSortType) +} + +func (oe orderedExpression) NullsLast() OrderedExpression { + return NewOrderedExpression(oe.sortExpression, oe.direction, NullsLastSortType) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/range.go b/vendor/github.com/doug-martin/goqu/v9/exp/range.go new file mode 100644 index 000000000..be167eed1 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/range.go @@ -0,0 +1,61 @@ +package exp + +type ( + ranged struct { + lhs Expression + rhs RangeVal + op RangeOperation + } + rangeVal struct { + start interface{} + end interface{} + } +) + +// used internally to create an BETWEEN comparison RangeExpression +func between(lhs Expression, rhs RangeVal) RangeExpression { + return NewRangeExpression(BetweenOp, lhs, rhs) +} + +// used internally to create an NOT BETWEEN comparison RangeExpression +func notBetween(lhs Expression, rhs RangeVal) RangeExpression { + return NewRangeExpression(NotBetweenOp, lhs, rhs) +} + +func NewRangeExpression(op RangeOperation, lhs Expression, rhs RangeVal) RangeExpression { + return ranged{op: op, lhs: lhs, rhs: rhs} +} + +func (r ranged) Clone() Expression { + return NewRangeExpression(r.op, r.lhs.Clone(), r.rhs) +} + +func (r ranged) Expression() Expression { + return r +} + +func (r ranged) RHS() RangeVal { + return r.rhs +} + +func (r ranged) LHS() Expression { + return r.lhs +} + +func (r ranged) Op() RangeOperation { + return r.op +} + +// Creates a new Range to be used with a Between expression +// exp.C("col").Between(exp.Range(1, 10)) +func NewRangeVal(start, end interface{}) RangeVal { + return rangeVal{start: start, end: end} +} + +func (rv rangeVal) Start() interface{} { + return rv.start +} + +func (rv rangeVal) End() interface{} { + return rv.end +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/record.go b/vendor/github.com/doug-martin/goqu/v9/exp/record.go new file mode 100644 index 000000000..80b99ae03 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/record.go @@ -0,0 +1,59 @@ +package exp + +import ( + "reflect" + "sort" + + "github.com/doug-martin/goqu/v9/internal/util" +) + +// Alternative to writing map[string]interface{}. Can be used for Inserts, Updates or Deletes +type Record map[string]interface{} + +func (r Record) Cols() []string { + cols := make([]string, 0, len(r)) + for col := range r { + cols = append(cols, col) + } + sort.Strings(cols) + return cols +} + +func NewRecordFromStruct(i interface{}, forInsert, forUpdate bool) (r Record, err error) { + value := reflect.ValueOf(i) + if value.IsValid() { + cm, err := util.GetColumnMap(value.Interface()) + if err != nil { + return nil, err + } + cols := cm.Cols() + r = make(map[string]interface{}, len(cols)) + for _, col := range cols { + f := cm[col] + if !shouldSkipField(f, forInsert, forUpdate) { + if ok, fieldVal := getFieldValue(value, f); ok { + r[f.ColumnName] = fieldVal + } + } + } + } + return +} + +func shouldSkipField(f util.ColumnData, forInsert, forUpdate bool) bool { + shouldSkipInsert := forInsert && !f.ShouldInsert + shouldSkipUpdate := forUpdate && !f.ShouldUpdate + return shouldSkipInsert || shouldSkipUpdate +} + +func getFieldValue(val reflect.Value, f util.ColumnData) (ok bool, fieldVal interface{}) { + if v, isAvailable := util.SafeGetFieldByIndex(val, f.FieldIndex); !isAvailable { + return false, nil + } else if f.DefaultIfEmpty && util.IsEmptyValue(v) { + return true, Default() + } else if v.IsValid() { + return true, v.Interface() + } else { + return true, reflect.Zero(f.GoType).Interface() + } +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/select_clauses.go b/vendor/github.com/doug-martin/goqu/v9/exp/select_clauses.go new file mode 100644 index 000000000..f802a88b7 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/select_clauses.go @@ -0,0 +1,379 @@ +package exp + +type ( + SelectClauses interface { + HasSources() bool + IsDefaultSelect() bool + clone() *selectClauses + + Select() ColumnListExpression + SelectAppend(cl ColumnListExpression) SelectClauses + SetSelect(cl ColumnListExpression) SelectClauses + + Distinct() ColumnListExpression + SetDistinct(cle ColumnListExpression) SelectClauses + + From() ColumnListExpression + SetFrom(cl ColumnListExpression) SelectClauses + + HasAlias() bool + Alias() IdentifierExpression + SetAlias(ie IdentifierExpression) SelectClauses + + Joins() JoinExpressions + JoinsAppend(jc JoinExpression) SelectClauses + + Where() ExpressionList + ClearWhere() SelectClauses + WhereAppend(expressions ...Expression) SelectClauses + + Having() ExpressionList + ClearHaving() SelectClauses + HavingAppend(expressions ...Expression) SelectClauses + + Order() ColumnListExpression + HasOrder() bool + ClearOrder() SelectClauses + SetOrder(oes ...OrderedExpression) SelectClauses + OrderAppend(...OrderedExpression) SelectClauses + OrderPrepend(...OrderedExpression) SelectClauses + + GroupBy() ColumnListExpression + SetGroupBy(cl ColumnListExpression) SelectClauses + GroupByAppend(cl ColumnListExpression) SelectClauses + + Limit() interface{} + HasLimit() bool + ClearLimit() SelectClauses + SetLimit(limit interface{}) SelectClauses + + Offset() uint + ClearOffset() SelectClauses + SetOffset(offset uint) SelectClauses + + Compounds() []CompoundExpression + CompoundsAppend(ce CompoundExpression) SelectClauses + + Lock() Lock + SetLock(l Lock) SelectClauses + + CommonTables() []CommonTableExpression + CommonTablesAppend(cte CommonTableExpression) SelectClauses + + Windows() []WindowExpression + SetWindows(ws []WindowExpression) SelectClauses + WindowsAppend(ws ...WindowExpression) SelectClauses + ClearWindows() SelectClauses + } + selectClauses struct { + commonTables []CommonTableExpression + selectColumns ColumnListExpression + distinct ColumnListExpression + from ColumnListExpression + joins JoinExpressions + where ExpressionList + alias IdentifierExpression + groupBy ColumnListExpression + having ExpressionList + order ColumnListExpression + limit interface{} + offset uint + compounds []CompoundExpression + lock Lock + windows []WindowExpression + } +) + +func NewSelectClauses() SelectClauses { + return &selectClauses{ + selectColumns: NewColumnListExpression(Star()), + } +} + +func (c *selectClauses) HasSources() bool { + return c.from != nil && len(c.from.Columns()) > 0 +} + +func (c *selectClauses) IsDefaultSelect() bool { + ret := false + if c.selectColumns != nil { + selects := c.selectColumns.Columns() + if len(selects) == 1 { + if l, ok := selects[0].(LiteralExpression); ok && l.Literal() == "*" { + ret = true + } + } + } + return ret +} + +func (c *selectClauses) clone() *selectClauses { + return &selectClauses{ + commonTables: c.commonTables, + selectColumns: c.selectColumns, + distinct: c.distinct, + from: c.from, + joins: c.joins[0:len(c.joins):len(c.joins)], + where: c.where, + alias: c.alias, + groupBy: c.groupBy, + having: c.having, + order: c.order, + limit: c.limit, + offset: c.offset, + compounds: c.compounds, + lock: c.lock, + windows: c.windows, + } +} + +func (c *selectClauses) CommonTables() []CommonTableExpression { + return c.commonTables +} + +func (c *selectClauses) CommonTablesAppend(cte CommonTableExpression) SelectClauses { + ret := c.clone() + ret.commonTables = append(ret.commonTables, cte) + return ret +} + +func (c *selectClauses) Select() ColumnListExpression { + return c.selectColumns +} + +func (c *selectClauses) SelectAppend(cl ColumnListExpression) SelectClauses { + ret := c.clone() + ret.selectColumns = ret.selectColumns.Append(cl.Columns()...) + return ret +} + +func (c *selectClauses) SetSelect(cl ColumnListExpression) SelectClauses { + ret := c.clone() + ret.selectColumns = cl + return ret +} + +func (c *selectClauses) Distinct() ColumnListExpression { + return c.distinct +} + +func (c *selectClauses) SetDistinct(cle ColumnListExpression) SelectClauses { + ret := c.clone() + ret.distinct = cle + return ret +} + +func (c *selectClauses) From() ColumnListExpression { + return c.from +} + +func (c *selectClauses) SetFrom(cl ColumnListExpression) SelectClauses { + ret := c.clone() + ret.from = cl + return ret +} + +func (c *selectClauses) HasAlias() bool { + return c.alias != nil +} + +func (c *selectClauses) Alias() IdentifierExpression { + return c.alias +} + +func (c *selectClauses) SetAlias(ie IdentifierExpression) SelectClauses { + ret := c.clone() + ret.alias = ie + return ret +} + +func (c *selectClauses) Joins() JoinExpressions { + return c.joins +} + +func (c *selectClauses) JoinsAppend(jc JoinExpression) SelectClauses { + ret := c.clone() + ret.joins = append(ret.joins, jc) + return ret +} + +func (c *selectClauses) Where() ExpressionList { + return c.where +} + +func (c *selectClauses) ClearWhere() SelectClauses { + ret := c.clone() + ret.where = nil + return ret +} + +func (c *selectClauses) WhereAppend(expressions ...Expression) SelectClauses { + if len(expressions) == 0 { + return c + } + ret := c.clone() + if ret.where == nil { + ret.where = NewExpressionList(AndType, expressions...) + } else { + ret.where = ret.where.Append(expressions...) + } + return ret +} + +func (c *selectClauses) Having() ExpressionList { + return c.having +} + +func (c *selectClauses) ClearHaving() SelectClauses { + ret := c.clone() + ret.having = nil + return ret +} + +func (c *selectClauses) HavingAppend(expressions ...Expression) SelectClauses { + if len(expressions) == 0 { + return c + } + ret := c.clone() + if ret.having == nil { + ret.having = NewExpressionList(AndType, expressions...) + } else { + ret.having = ret.having.Append(expressions...) + } + return ret +} + +func (c *selectClauses) Lock() Lock { + return c.lock +} + +func (c *selectClauses) SetLock(l Lock) SelectClauses { + ret := c.clone() + ret.lock = l + return ret +} + +func (c *selectClauses) Order() ColumnListExpression { + return c.order +} + +func (c *selectClauses) HasOrder() bool { + return c.order != nil +} + +func (c *selectClauses) ClearOrder() SelectClauses { + ret := c.clone() + ret.order = nil + return ret +} + +func (c *selectClauses) SetOrder(oes ...OrderedExpression) SelectClauses { + ret := c.clone() + ret.order = NewOrderedColumnList(oes...) + return ret +} + +func (c *selectClauses) OrderAppend(oes ...OrderedExpression) SelectClauses { + if c.order == nil { + return c.SetOrder(oes...) + } + ret := c.clone() + ret.order = ret.order.Append(NewOrderedColumnList(oes...).Columns()...) + return ret +} + +func (c *selectClauses) OrderPrepend(oes ...OrderedExpression) SelectClauses { + if c.order == nil { + return c.SetOrder(oes...) + } + ret := c.clone() + ret.order = NewOrderedColumnList(oes...).Append(ret.order.Columns()...) + return ret +} + +func (c *selectClauses) GroupBy() ColumnListExpression { + return c.groupBy +} + +func (c *selectClauses) GroupByAppend(cl ColumnListExpression) SelectClauses { + if c.groupBy == nil { + return c.SetGroupBy(cl) + } + ret := c.clone() + ret.groupBy = ret.groupBy.Append(cl.Columns()...) + return ret +} + +func (c *selectClauses) SetGroupBy(cl ColumnListExpression) SelectClauses { + ret := c.clone() + ret.groupBy = cl + return ret +} + +func (c *selectClauses) Limit() interface{} { + return c.limit +} + +func (c *selectClauses) HasLimit() bool { + return c.limit != nil +} + +func (c *selectClauses) ClearLimit() SelectClauses { + ret := c.clone() + ret.limit = nil + return ret +} + +func (c *selectClauses) SetLimit(limit interface{}) SelectClauses { + ret := c.clone() + ret.limit = limit + return ret +} + +func (c *selectClauses) Offset() uint { + return c.offset +} + +func (c *selectClauses) ClearOffset() SelectClauses { + ret := c.clone() + ret.offset = 0 + return ret +} + +func (c *selectClauses) SetOffset(offset uint) SelectClauses { + ret := c.clone() + ret.offset = offset + return ret +} + +func (c *selectClauses) Compounds() []CompoundExpression { + return c.compounds +} + +func (c *selectClauses) CompoundsAppend(ce CompoundExpression) SelectClauses { + ret := c.clone() + ret.compounds = append(ret.compounds, ce) + return ret +} + +func (c *selectClauses) Windows() []WindowExpression { + return c.windows +} + +func (c *selectClauses) SetWindows(ws []WindowExpression) SelectClauses { + ret := c.clone() + ret.windows = ws + return ret +} + +func (c *selectClauses) WindowsAppend(ws ...WindowExpression) SelectClauses { + ret := c.clone() + ret.windows = append(ret.windows, ws...) + return ret +} + +func (c *selectClauses) ClearWindows() SelectClauses { + ret := c.clone() + ret.windows = nil + return ret +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/truncate.go b/vendor/github.com/doug-martin/goqu/v9/exp/truncate.go new file mode 100644 index 000000000..1486a0036 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/truncate.go @@ -0,0 +1,11 @@ +package exp + +// Options to use when generating a TRUNCATE statement +type TruncateOptions struct { + // Set to true to add CASCADE to the TRUNCATE statement + Cascade bool + // Set to true to add RESTRICT to the TRUNCATE statement + Restrict bool + // Set to true to specify IDENTITY options, (e.g. RESTART, CONTINUE) to the TRUNCATE statement + Identity string +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/truncate_clauses.go b/vendor/github.com/doug-martin/goqu/v9/exp/truncate_clauses.go new file mode 100644 index 000000000..2b3bd8b59 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/truncate_clauses.go @@ -0,0 +1,50 @@ +package exp + +type ( + TruncateClauses interface { + HasTable() bool + clone() *truncateClauses + + Table() ColumnListExpression + SetTable(tables ColumnListExpression) TruncateClauses + + Options() TruncateOptions + SetOptions(opts TruncateOptions) TruncateClauses + } + truncateClauses struct { + tables ColumnListExpression + options TruncateOptions + } +) + +func NewTruncateClauses() TruncateClauses { + return &truncateClauses{} +} + +func (tc *truncateClauses) HasTable() bool { + return tc.tables != nil +} + +func (tc *truncateClauses) clone() *truncateClauses { + return &truncateClauses{ + tables: tc.tables, + } +} + +func (tc *truncateClauses) Table() ColumnListExpression { + return tc.tables +} +func (tc *truncateClauses) SetTable(tables ColumnListExpression) TruncateClauses { + ret := tc.clone() + ret.tables = tables + return ret +} + +func (tc *truncateClauses) Options() TruncateOptions { + return tc.options +} +func (tc *truncateClauses) SetOptions(opts TruncateOptions) TruncateClauses { + ret := tc.clone() + ret.options = opts + return ret +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/update.go b/vendor/github.com/doug-martin/goqu/v9/exp/update.go new file mode 100644 index 000000000..da9b19877 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/update.go @@ -0,0 +1,69 @@ +package exp + +import ( + "reflect" + "sort" + + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/util" +) + +type ( + update struct { + col IdentifierExpression + val interface{} + } +) + +func set(col IdentifierExpression, val interface{}) UpdateExpression { + return update{col: col, val: val} +} + +func NewUpdateExpressions(update interface{}) (updates []UpdateExpression, err error) { + if u, ok := update.(UpdateExpression); ok { + updates = append(updates, u) + return updates, nil + } + updateValue := reflect.Indirect(reflect.ValueOf(update)) + switch updateValue.Kind() { + case reflect.Map: + keys := util.ValueSlice(updateValue.MapKeys()) + sort.Sort(keys) + for _, key := range keys { + updates = append(updates, ParseIdentifier(key.String()).Set(updateValue.MapIndex(key).Interface())) + } + case reflect.Struct: + return getUpdateExpressionsStruct(updateValue) + default: + return nil, errors.New("unsupported update interface type %+v", updateValue.Type()) + } + return updates, nil +} + +func getUpdateExpressionsStruct(value reflect.Value) (updates []UpdateExpression, err error) { + r, err := NewRecordFromStruct(value.Interface(), false, true) + if err != nil { + return updates, err + } + cols := r.Cols() + for _, col := range cols { + updates = append(updates, ParseIdentifier(col).Set(r[col])) + } + return updates, nil +} + +func (u update) Expression() Expression { + return u +} + +func (u update) Clone() Expression { + return update{col: u.col.Clone().(IdentifierExpression), val: u.val} +} + +func (u update) Col() IdentifierExpression { + return u.col +} + +func (u update) Val() interface{} { + return u.val +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/update_clauses.go b/vendor/github.com/doug-martin/goqu/v9/exp/update_clauses.go new file mode 100644 index 000000000..562ca8dc1 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/update_clauses.go @@ -0,0 +1,216 @@ +package exp + +type ( + UpdateClauses interface { + HasTable() bool + clone() *updateClauses + + CommonTables() []CommonTableExpression + CommonTablesAppend(cte CommonTableExpression) UpdateClauses + + Table() Expression + SetTable(table Expression) UpdateClauses + + SetValues() interface{} + HasSetValues() bool + SetSetValues(values interface{}) UpdateClauses + + From() ColumnListExpression + HasFrom() bool + SetFrom(tables ColumnListExpression) UpdateClauses + + Where() ExpressionList + ClearWhere() UpdateClauses + WhereAppend(expressions ...Expression) UpdateClauses + + Order() ColumnListExpression + HasOrder() bool + ClearOrder() UpdateClauses + SetOrder(oes ...OrderedExpression) UpdateClauses + OrderAppend(...OrderedExpression) UpdateClauses + OrderPrepend(...OrderedExpression) UpdateClauses + + Limit() interface{} + HasLimit() bool + ClearLimit() UpdateClauses + SetLimit(limit interface{}) UpdateClauses + + Returning() ColumnListExpression + HasReturning() bool + SetReturning(cl ColumnListExpression) UpdateClauses + } + updateClauses struct { + commonTables []CommonTableExpression + table Expression + setValues interface{} + from ColumnListExpression + where ExpressionList + order ColumnListExpression + limit interface{} + returning ColumnListExpression + } +) + +func NewUpdateClauses() UpdateClauses { + return &updateClauses{} +} + +func (uc *updateClauses) HasTable() bool { + return uc.table != nil +} + +func (uc *updateClauses) clone() *updateClauses { + return &updateClauses{ + commonTables: uc.commonTables, + table: uc.table, + setValues: uc.setValues, + from: uc.from, + where: uc.where, + order: uc.order, + limit: uc.limit, + returning: uc.returning, + } +} + +func (uc *updateClauses) CommonTables() []CommonTableExpression { + return uc.commonTables +} + +func (uc *updateClauses) CommonTablesAppend(cte CommonTableExpression) UpdateClauses { + ret := uc.clone() + ret.commonTables = append(ret.commonTables, cte) + return ret +} + +func (uc *updateClauses) Table() Expression { + return uc.table +} + +func (uc *updateClauses) SetTable(table Expression) UpdateClauses { + ret := uc.clone() + ret.table = table + return ret +} + +func (uc *updateClauses) SetValues() interface{} { + return uc.setValues +} + +func (uc *updateClauses) HasSetValues() bool { + return uc.setValues != nil +} + +func (uc *updateClauses) SetSetValues(values interface{}) UpdateClauses { + ret := uc.clone() + ret.setValues = values + return ret +} + +func (uc *updateClauses) From() ColumnListExpression { + return uc.from +} + +func (uc *updateClauses) HasFrom() bool { + return uc.from != nil && !uc.from.IsEmpty() +} + +func (uc *updateClauses) SetFrom(from ColumnListExpression) UpdateClauses { + ret := uc.clone() + ret.from = from + return ret +} + +func (uc *updateClauses) Where() ExpressionList { + return uc.where +} + +func (uc *updateClauses) ClearWhere() UpdateClauses { + ret := uc.clone() + ret.where = nil + return ret +} + +func (uc *updateClauses) WhereAppend(expressions ...Expression) UpdateClauses { + if len(expressions) == 0 { + return uc + } + ret := uc.clone() + if ret.where == nil { + ret.where = NewExpressionList(AndType, expressions...) + } else { + ret.where = ret.where.Append(expressions...) + } + return ret +} + +func (uc *updateClauses) Order() ColumnListExpression { + return uc.order +} + +func (uc *updateClauses) HasOrder() bool { + return uc.order != nil +} + +func (uc *updateClauses) ClearOrder() UpdateClauses { + ret := uc.clone() + ret.order = nil + return ret +} + +func (uc *updateClauses) SetOrder(oes ...OrderedExpression) UpdateClauses { + ret := uc.clone() + ret.order = NewOrderedColumnList(oes...) + return ret +} + +func (uc *updateClauses) OrderAppend(oes ...OrderedExpression) UpdateClauses { + if uc.order == nil { + return uc.SetOrder(oes...) + } + ret := uc.clone() + ret.order = ret.order.Append(NewOrderedColumnList(oes...).Columns()...) + return ret +} + +func (uc *updateClauses) OrderPrepend(oes ...OrderedExpression) UpdateClauses { + if uc.order == nil { + return uc.SetOrder(oes...) + } + ret := uc.clone() + ret.order = NewOrderedColumnList(oes...).Append(ret.order.Columns()...) + return ret +} + +func (uc *updateClauses) Limit() interface{} { + return uc.limit +} + +func (uc *updateClauses) HasLimit() bool { + return uc.limit != nil +} + +func (uc *updateClauses) ClearLimit() UpdateClauses { + ret := uc.clone() + ret.limit = nil + return ret +} + +func (uc *updateClauses) SetLimit(limit interface{}) UpdateClauses { + ret := uc.clone() + ret.limit = limit + return ret +} + +func (uc *updateClauses) Returning() ColumnListExpression { + return uc.returning +} + +func (uc *updateClauses) HasReturning() bool { + return uc.returning != nil && !uc.returning.IsEmpty() +} + +func (uc *updateClauses) SetReturning(cl ColumnListExpression) UpdateClauses { + ret := uc.clone() + ret.returning = cl + return ret +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/window.go b/vendor/github.com/doug-martin/goqu/v9/exp/window.go new file mode 100644 index 000000000..a33178a03 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/window.go @@ -0,0 +1,90 @@ +package exp + +type sqlWindowExpression struct { + name IdentifierExpression + parent IdentifierExpression + partitionCols ColumnListExpression + orderCols ColumnListExpression +} + +func NewWindowExpression(window, parent IdentifierExpression, partitionCols, orderCols ColumnListExpression) WindowExpression { + if partitionCols == nil { + partitionCols = NewColumnListExpression() + } + if orderCols == nil { + orderCols = NewColumnListExpression() + } + return sqlWindowExpression{ + name: window, + parent: parent, + partitionCols: partitionCols, + orderCols: orderCols, + } +} + +func (we sqlWindowExpression) clone() sqlWindowExpression { + return sqlWindowExpression{ + name: we.name, + parent: we.parent, + partitionCols: we.partitionCols.Clone().(ColumnListExpression), + orderCols: we.orderCols.Clone().(ColumnListExpression), + } +} + +func (we sqlWindowExpression) Clone() Expression { + return we.clone() +} + +func (we sqlWindowExpression) Expression() Expression { + return we +} + +func (we sqlWindowExpression) Name() IdentifierExpression { + return we.name +} + +func (we sqlWindowExpression) HasName() bool { + return we.name != nil +} + +func (we sqlWindowExpression) Parent() IdentifierExpression { + return we.parent +} + +func (we sqlWindowExpression) HasParent() bool { + return we.parent != nil +} + +func (we sqlWindowExpression) PartitionCols() ColumnListExpression { + return we.partitionCols +} + +func (we sqlWindowExpression) HasPartitionBy() bool { + return we.partitionCols != nil && !we.partitionCols.IsEmpty() +} + +func (we sqlWindowExpression) OrderCols() ColumnListExpression { + return we.orderCols +} + +func (we sqlWindowExpression) HasOrder() bool { + return we.orderCols != nil && !we.orderCols.IsEmpty() +} + +func (we sqlWindowExpression) PartitionBy(cols ...interface{}) WindowExpression { + ret := we.clone() + ret.partitionCols = NewColumnListExpression(cols...) + return ret +} + +func (we sqlWindowExpression) OrderBy(cols ...interface{}) WindowExpression { + ret := we.clone() + ret.orderCols = NewColumnListExpression(cols...) + return ret +} + +func (we sqlWindowExpression) Inherit(parent string) WindowExpression { + ret := we.clone() + ret.parent = ParseIdentifier(parent) + return ret +} diff --git a/vendor/github.com/doug-martin/goqu/v9/exp/window_func.go b/vendor/github.com/doug-martin/goqu/v9/exp/window_func.go new file mode 100644 index 000000000..d5ae5531a --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/exp/window_func.go @@ -0,0 +1,124 @@ +package exp + +type sqlWindowFunctionExpression struct { + fn SQLFunctionExpression + windowName IdentifierExpression + window WindowExpression +} + +func NewSQLWindowFunctionExpression( + fn SQLFunctionExpression, + windowName IdentifierExpression, + window WindowExpression) SQLWindowFunctionExpression { + return sqlWindowFunctionExpression{ + fn: fn, + windowName: windowName, + window: window, + } +} + +func (swfe sqlWindowFunctionExpression) clone() sqlWindowFunctionExpression { + return sqlWindowFunctionExpression{ + fn: swfe.fn.Clone().(SQLFunctionExpression), + windowName: swfe.windowName, + window: swfe.window, + } +} + +func (swfe sqlWindowFunctionExpression) Clone() Expression { + return swfe.clone() +} + +func (swfe sqlWindowFunctionExpression) Expression() Expression { + return swfe +} + +func (swfe sqlWindowFunctionExpression) As(val interface{}) AliasedExpression { + return NewAliasExpression(swfe, val) +} +func (swfe sqlWindowFunctionExpression) Eq(val interface{}) BooleanExpression { return eq(swfe, val) } +func (swfe sqlWindowFunctionExpression) Neq(val interface{}) BooleanExpression { return neq(swfe, val) } +func (swfe sqlWindowFunctionExpression) Gt(val interface{}) BooleanExpression { return gt(swfe, val) } +func (swfe sqlWindowFunctionExpression) Gte(val interface{}) BooleanExpression { return gte(swfe, val) } +func (swfe sqlWindowFunctionExpression) Lt(val interface{}) BooleanExpression { return lt(swfe, val) } +func (swfe sqlWindowFunctionExpression) Lte(val interface{}) BooleanExpression { return lte(swfe, val) } +func (swfe sqlWindowFunctionExpression) Between(val RangeVal) RangeExpression { + return between(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) NotBetween(val RangeVal) RangeExpression { + return notBetween(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) Like(val interface{}) BooleanExpression { + return like(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) NotLike(val interface{}) BooleanExpression { + return notLike(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) ILike(val interface{}) BooleanExpression { + return iLike(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) NotILike(val interface{}) BooleanExpression { + return notILike(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) RegexpLike(val interface{}) BooleanExpression { + return regexpLike(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) RegexpNotLike(val interface{}) BooleanExpression { + return regexpNotLike(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) RegexpILike(val interface{}) BooleanExpression { + return regexpILike(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) RegexpNotILike(val interface{}) BooleanExpression { + return regexpNotILike(swfe, val) +} + +func (swfe sqlWindowFunctionExpression) In(vals ...interface{}) BooleanExpression { + return in(swfe, vals...) +} + +func (swfe sqlWindowFunctionExpression) NotIn(vals ...interface{}) BooleanExpression { + return notIn(swfe, vals...) +} +func (swfe sqlWindowFunctionExpression) Is(val interface{}) BooleanExpression { return is(swfe, val) } +func (swfe sqlWindowFunctionExpression) IsNot(val interface{}) BooleanExpression { + return isNot(swfe, val) +} +func (swfe sqlWindowFunctionExpression) IsNull() BooleanExpression { return is(swfe, nil) } +func (swfe sqlWindowFunctionExpression) IsNotNull() BooleanExpression { return isNot(swfe, nil) } +func (swfe sqlWindowFunctionExpression) IsTrue() BooleanExpression { return is(swfe, true) } +func (swfe sqlWindowFunctionExpression) IsNotTrue() BooleanExpression { return isNot(swfe, true) } +func (swfe sqlWindowFunctionExpression) IsFalse() BooleanExpression { return is(swfe, false) } +func (swfe sqlWindowFunctionExpression) IsNotFalse() BooleanExpression { return isNot(swfe, false) } + +func (swfe sqlWindowFunctionExpression) Asc() OrderedExpression { return asc(swfe) } +func (swfe sqlWindowFunctionExpression) Desc() OrderedExpression { return desc(swfe) } + +func (swfe sqlWindowFunctionExpression) Func() SQLFunctionExpression { + return swfe.fn +} + +func (swfe sqlWindowFunctionExpression) Window() WindowExpression { + return swfe.window +} + +func (swfe sqlWindowFunctionExpression) WindowName() IdentifierExpression { + return swfe.windowName +} + +func (swfe sqlWindowFunctionExpression) HasWindow() bool { + return swfe.window != nil +} + +func (swfe sqlWindowFunctionExpression) HasWindowName() bool { + return swfe.windowName != nil +} diff --git a/vendor/github.com/doug-martin/goqu/v9/expressions.go b/vendor/github.com/doug-martin/goqu/v9/expressions.go new file mode 100644 index 000000000..5e5ef86f8 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/expressions.go @@ -0,0 +1,303 @@ +package goqu + +import ( + "github.com/doug-martin/goqu/v9/exp" +) + +type ( + Expression = exp.Expression + Ex = exp.Ex + ExOr = exp.ExOr + Op = exp.Op + Record = exp.Record + Vals = exp.Vals + // Options to use when generating a TRUNCATE statement + TruncateOptions = exp.TruncateOptions +) + +// emptyWindow is an empty WINDOW clause without name +var emptyWindow = exp.NewWindowExpression(nil, nil, nil, nil) + +const ( + Wait = exp.Wait + NoWait = exp.NoWait + SkipLocked = exp.SkipLocked +) + +// Creates a new Casted expression +// Cast(I("a"), "NUMERIC") -> CAST("a" AS NUMERIC) +func Cast(e exp.Expression, t string) exp.CastExpression { + return exp.NewCastExpression(e, t) +} + +// Creates a conflict struct to be passed to InsertConflict to ignore constraint errors +// InsertConflict(DoNothing(),...) -> INSERT INTO ... ON CONFLICT DO NOTHING +func DoNothing() exp.ConflictExpression { + return exp.NewDoNothingConflictExpression() +} + +// Creates a ConflictUpdate struct to be passed to InsertConflict +// Represents a ON CONFLICT DO UPDATE portion of an INSERT statement (ON DUPLICATE KEY UPDATE for mysql) +// +// InsertConflict(DoUpdate("target_column", update),...) -> +// INSERT INTO ... ON CONFLICT DO UPDATE SET a=b +// InsertConflict(DoUpdate("target_column", update).Where(Ex{"a": 1},...) -> +// INSERT INTO ... ON CONFLICT DO UPDATE SET a=b WHERE a=1 +func DoUpdate(target string, update interface{}) exp.ConflictUpdateExpression { + return exp.NewDoUpdateConflictExpression(target, update) +} + +// A list of expressions that should be ORed together +// Or(I("a").Eq(10), I("b").Eq(11)) //(("a" = 10) OR ("b" = 11)) +func Or(expressions ...exp.Expression) exp.ExpressionList { + return exp.NewExpressionList(exp.OrType, expressions...) +} + +// A list of expressions that should be ANDed together +// And(I("a").Eq(10), I("b").Eq(11)) //(("a" = 10) AND ("b" = 11)) +func And(expressions ...exp.Expression) exp.ExpressionList { + return exp.NewExpressionList(exp.AndType, expressions...) +} + +// Creates a new SQLFunctionExpression with the given name and arguments +func Func(name string, args ...interface{}) exp.SQLFunctionExpression { + return exp.NewSQLFunctionExpression(name, args...) +} + +// used internally to normalize the column name if passed in as a string it should be turned into an identifier +func newIdentifierFunc(name string, col interface{}) exp.SQLFunctionExpression { + if s, ok := col.(string); ok { + col = I(s) + } + return Func(name, col) +} + +// Creates a new DISTINCT sql function +// DISTINCT("a") -> DISTINCT("a") +// DISTINCT(I("a")) -> DISTINCT("a") +func DISTINCT(col interface{}) exp.SQLFunctionExpression { return newIdentifierFunc("DISTINCT", col) } + +// Creates a new COUNT sql function +// COUNT("a") -> COUNT("a") +// COUNT("*") -> COUNT("*") +// COUNT(I("a")) -> COUNT("a") +func COUNT(col interface{}) exp.SQLFunctionExpression { return newIdentifierFunc("COUNT", col) } + +// Creates a new MIN sql function +// MIN("a") -> MIN("a") +// MIN(I("a")) -> MIN("a") +func MIN(col interface{}) exp.SQLFunctionExpression { return newIdentifierFunc("MIN", col) } + +// Creates a new MAX sql function +// MAX("a") -> MAX("a") +// MAX(I("a")) -> MAX("a") +func MAX(col interface{}) exp.SQLFunctionExpression { return newIdentifierFunc("MAX", col) } + +// Creates a new AVG sql function +// AVG("a") -> AVG("a") +// AVG(I("a")) -> AVG("a") +func AVG(col interface{}) exp.SQLFunctionExpression { return newIdentifierFunc("AVG", col) } + +// Creates a new FIRST sql function +// FIRST("a") -> FIRST("a") +// FIRST(I("a")) -> FIRST("a") +func FIRST(col interface{}) exp.SQLFunctionExpression { return newIdentifierFunc("FIRST", col) } + +// Creates a new LAST sql function +// LAST("a") -> LAST("a") +// LAST(I("a")) -> LAST("a") +func LAST(col interface{}) exp.SQLFunctionExpression { return newIdentifierFunc("LAST", col) } + +// Creates a new SUM sql function +// SUM("a") -> SUM("a") +// SUM(I("a")) -> SUM("a") +func SUM(col interface{}) exp.SQLFunctionExpression { return newIdentifierFunc("SUM", col) } + +// Creates a new COALESCE sql function +// COALESCE(I("a"), "a") -> COALESCE("a", 'a') +// COALESCE(I("a"), I("b"), nil) -> COALESCE("a", "b", NULL) +func COALESCE(vals ...interface{}) exp.SQLFunctionExpression { + return Func("COALESCE", vals...) +} + +//nolint:stylecheck,golint // sql function name +func ROW_NUMBER() exp.SQLFunctionExpression { + return Func("ROW_NUMBER") +} + +func RANK() exp.SQLFunctionExpression { + return Func("RANK") +} + +//nolint:stylecheck,golint // sql function name +func DENSE_RANK() exp.SQLFunctionExpression { + return Func("DENSE_RANK") +} + +//nolint:stylecheck,golint // sql function name +func PERCENT_RANK() exp.SQLFunctionExpression { + return Func("PERCENT_RANK") +} + +//nolint:stylecheck,golint //sql function name +func CUME_DIST() exp.SQLFunctionExpression { + return Func("CUME_DIST") +} + +func NTILE(n int) exp.SQLFunctionExpression { + return Func("NTILE", n) +} + +//nolint:stylecheck,golint //sql function name +func FIRST_VALUE(val interface{}) exp.SQLFunctionExpression { + return newIdentifierFunc("FIRST_VALUE", val) +} + +//nolint:stylecheck,golint //sql function name +func LAST_VALUE(val interface{}) exp.SQLFunctionExpression { + return newIdentifierFunc("LAST_VALUE", val) +} + +//nolint:stylecheck,golint //sql function name +func NTH_VALUE(val interface{}, nth int) exp.SQLFunctionExpression { + if s, ok := val.(string); ok { + val = I(s) + } + return Func("NTH_VALUE", val, nth) +} + +// Creates a new Identifier, the generated sql will use adapter specific quoting or '"' by default, this ensures case +// sensitivity and in certain databases allows for special characters, (e.g. "curr-table", "my table"). +// +// The identifier will be split by '.' +// +// Table and Column example +// I("table.column") -> "table"."column" //A Column and table +// Schema table and column +// I("schema.table.column") -> "schema"."table"."column" +// Table with star +// I("table.*") -> "table".* +func I(ident string) exp.IdentifierExpression { + return exp.ParseIdentifier(ident) +} + +// Creates a new Column Identifier, the generated sql will use adapter specific quoting or '"' by default, this ensures case +// sensitivity and in certain databases allows for special characters, (e.g. "curr-table", "my table"). +// An Identifier can represent a one or a combination of schema, table, and/or column. +// C("column") -> "column" //A Column +// C("column").Table("table") -> "table"."column" //A Column and table +// C("column").Table("table").Schema("schema") //Schema table and column +// C("*") //Also handles the * operator +func C(col string) exp.IdentifierExpression { + return exp.NewIdentifierExpression("", "", col) +} + +// Creates a new Schema Identifier, the generated sql will use adapter specific quoting or '"' by default, this ensures case +// sensitivity and in certain databases allows for special characters, (e.g. "curr-schema", "my schema"). +// S("schema") -> "schema" //A Schema +// S("schema").Table("table") -> "schema"."table" //A Schema and table +// S("schema").Table("table").Col("col") //Schema table and column +// S("schema").Table("table").Col("*") //Schema table and all columns +func S(schema string) exp.IdentifierExpression { + return exp.NewIdentifierExpression(schema, "", "") +} + +// Creates a new Table Identifier, the generated sql will use adapter specific quoting or '"' by default, this ensures case +// sensitivity and in certain databases allows for special characters, (e.g. "curr-table", "my table"). +// T("table") -> "table" //A Column +// T("table").Col("col") -> "table"."column" //A Column and table +// T("table").Schema("schema").Col("col) -> "schema"."table"."column" //Schema table and column +// T("table").Schema("schema").Col("*") -> "schema"."table".* //Also handles the * operator +func T(table string) exp.IdentifierExpression { + return exp.NewIdentifierExpression("", table, "") +} + +// Create a new WINDOW clause +// W() -> () +// W().PartitionBy("a") -> (PARTITION BY "a") +// W().PartitionBy("a").OrderBy("b") -> (PARTITION BY "a" ORDER BY "b") +// W().PartitionBy("a").OrderBy("b").Inherit("w1") -> ("w1" PARTITION BY "a" ORDER BY "b") +// W().PartitionBy("a").OrderBy(I("b").Desc()).Inherit("w1") -> ("w1" PARTITION BY "a" ORDER BY "b" DESC) +// W("w") -> "w" AS () +// W("w", "w1") -> "w" AS ("w1") +// W("w").Inherit("w1") -> "w" AS ("w1") +// W("w").PartitionBy("a") -> "w" AS (PARTITION BY "a") +// W("w", "w1").PartitionBy("a") -> "w" AS ("w1" PARTITION BY "a") +// W("w", "w1").PartitionBy("a").OrderBy("b") -> "w" AS ("w1" PARTITION BY "a" ORDER BY "b") +func W(ws ...string) exp.WindowExpression { + switch len(ws) { + case 0: + return emptyWindow + case 1: + return exp.NewWindowExpression(I(ws[0]), nil, nil, nil) + default: + return exp.NewWindowExpression(I(ws[0]), I(ws[1]), nil, nil) + } +} + +// Creates a new ON clause to be used within a join +// ds.Join(goqu.T("my_table"), goqu.On( +// goqu.I("my_table.fkey").Eq(goqu.I("other_table.id")), +// )) +func On(expressions ...exp.Expression) exp.JoinCondition { + return exp.NewJoinOnCondition(expressions...) +} + +// Creates a new USING clause to be used within a join +// ds.Join(goqu.T("my_table"), goqu.Using("fkey")) +func Using(columns ...interface{}) exp.JoinCondition { + return exp.NewJoinUsingCondition(columns...) +} + +// Creates a new SQL literal with the provided arguments. +// L("a = 1") -> a = 1 +// You can also you placeholders. All placeholders within a Literal are represented by '?' +// L("a = ?", "b") -> a = 'b' +// Literals can also contain placeholders for other expressions +// L("(? AND ?) OR (?)", I("a").Eq(1), I("b").Eq("b"), I("c").In([]string{"a", "b", "c"})) +func L(sql string, args ...interface{}) exp.LiteralExpression { + return Literal(sql, args...) +} + +// Alias for goqu.L +func Literal(sql string, args ...interface{}) exp.LiteralExpression { + return exp.NewLiteralExpression(sql, args...) +} + +// Create a new SQL value ( alias for goqu.L("?", val) ). The prrimary use case for this would be in selects. +// See examples. +func V(val interface{}) exp.LiteralExpression { + return exp.NewLiteralExpression("?", val) +} + +// Creates a new Range to be used with a Between expression +// exp.C("col").Between(exp.Range(1, 10)) +func Range(start, end interface{}) exp.RangeVal { + return exp.NewRangeVal(start, end) +} + +// Creates a literal * +func Star() exp.LiteralExpression { return exp.Star() } + +// Returns a literal for DEFAULT sql keyword +func Default() exp.LiteralExpression { + return exp.Default() +} + +func Lateral(table exp.AppendableExpression) exp.LateralExpression { + return exp.NewLateralExpression(table) +} + +// Create a new ANY comparison +func Any(val interface{}) exp.SQLFunctionExpression { + return Func("ANY ", val) +} + +// Create a new ALL comparison +func All(val interface{}) exp.SQLFunctionExpression { + return Func("ALL ", val) +} + +func Case() exp.CaseExpression { + return exp.NewCaseExpression() +} diff --git a/vendor/github.com/doug-martin/goqu/v9/go.test.sh b/vendor/github.com/doug-martin/goqu/v9/go.test.sh new file mode 100644 index 000000000..4d7be372e --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/go.test.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +go test -race -coverprofile=coverage.txt -coverpkg=./... ./... \ No newline at end of file diff --git a/vendor/github.com/doug-martin/goqu/v9/goqu.go b/vendor/github.com/doug-martin/goqu/v9/goqu.go new file mode 100644 index 000000000..a2c0ec285 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/goqu.go @@ -0,0 +1,86 @@ +/* +goqu an idiomatch SQL builder, and query package. + + __ _ ___ __ _ _ _ + / _` |/ _ \ / _` | | | | + | (_| | (_) | (_| | |_| | + \__, |\___/ \__, |\__,_| + |___/ |_| + + +Please see https://github.com/doug-martin/goqu for an introduction to goqu. +*/ +package goqu + +import ( + "time" + + "github.com/doug-martin/goqu/v9/internal/util" + "github.com/doug-martin/goqu/v9/sqlgen" +) + +type DialectWrapper struct { + dialect string +} + +// Creates a new DialectWrapper to create goqu.Datasets or goqu.Databases with the specified dialect. +func Dialect(dialect string) DialectWrapper { + return DialectWrapper{dialect: dialect} +} + +// Create a new dataset for creating SELECT sql statements +func (dw DialectWrapper) From(table ...interface{}) *SelectDataset { + return From(table...).WithDialect(dw.dialect) +} + +// Create a new dataset for creating SELECT sql statements +func (dw DialectWrapper) Select(cols ...interface{}) *SelectDataset { + return newDataset(dw.dialect, nil).Select(cols...) +} + +// Create a new dataset for creating UPDATE sql statements +func (dw DialectWrapper) Update(table interface{}) *UpdateDataset { + return Update(table).WithDialect(dw.dialect) +} + +// Create a new dataset for creating INSERT sql statements +func (dw DialectWrapper) Insert(table interface{}) *InsertDataset { + return Insert(table).WithDialect(dw.dialect) +} + +// Create a new dataset for creating DELETE sql statements +func (dw DialectWrapper) Delete(table interface{}) *DeleteDataset { + return Delete(table).WithDialect(dw.dialect) +} + +// Create a new dataset for creating TRUNCATE sql statements +func (dw DialectWrapper) Truncate(table ...interface{}) *TruncateDataset { + return Truncate(table...).WithDialect(dw.dialect) +} + +func (dw DialectWrapper) DB(db SQLDatabase) *Database { + return newDatabase(dw.dialect, db) +} + +func New(dialect string, db SQLDatabase) *Database { + return newDatabase(dialect, db) +} + +// Set the behavior when encountering struct fields that do not have a db tag. +// By default this is false; if set to true any field without a db tag will not +// be targeted by Select or Scan operations. +func SetIgnoreUntaggedFields(ignore bool) { + util.SetIgnoreUntaggedFields(ignore) +} + +// Set the column rename function. This is used for struct fields that do not have a db tag to specify the column name +// By default all struct fields that do not have a db tag will be converted lowercase +func SetColumnRenameFunction(renameFunc func(string) string) { + util.SetColumnRenameFunction(renameFunc) +} + +// Set the location to use when interpolating time.Time instances. See https://golang.org/pkg/time/#LoadLocation +// NOTE: This has no effect when using prepared statements. +func SetTimeLocation(loc *time.Location) { + sqlgen.SetTimeLocation(loc) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/insert_dataset.go b/vendor/github.com/doug-martin/goqu/v9/insert_dataset.go new file mode 100644 index 000000000..5404ee82c --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/insert_dataset.go @@ -0,0 +1,271 @@ +package goqu + +import ( + "fmt" + + "github.com/doug-martin/goqu/v9/exec" + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +type InsertDataset struct { + dialect SQLDialect + clauses exp.InsertClauses + isPrepared prepared + queryFactory exec.QueryFactory + err error +} + +var ErrUnsupportedIntoType = errors.New("unsupported table type, a string or identifier expression is required") + +// used internally by database to create a database with a specific adapter +func newInsertDataset(d string, queryFactory exec.QueryFactory) *InsertDataset { + return &InsertDataset{ + clauses: exp.NewInsertClauses(), + dialect: GetDialect(d), + queryFactory: queryFactory, + } +} + +// Creates a new InsertDataset for the provided table. Using this method will only allow you +// to create SQL user Database#From to create an InsertDataset with query capabilities +func Insert(table interface{}) *InsertDataset { + return newInsertDataset("default", nil).Into(table) +} + +// Set the parameter interpolation behavior. See examples +// +// prepared: If true the dataset WILL NOT interpolate the parameters. +func (id *InsertDataset) Prepared(prepared bool) *InsertDataset { + ret := id.copy(id.clauses) + ret.isPrepared = preparedFromBool(prepared) + return ret +} + +func (id *InsertDataset) IsPrepared() bool { + return id.isPrepared.Bool() +} + +// Sets the adapter used to serialize values and create the SQL statement +func (id *InsertDataset) WithDialect(dl string) *InsertDataset { + ds := id.copy(id.GetClauses()) + ds.dialect = GetDialect(dl) + return ds +} + +// Returns the current adapter on the dataset +func (id *InsertDataset) Dialect() SQLDialect { + return id.dialect +} + +// Returns the current adapter on the dataset +func (id *InsertDataset) SetDialect(dialect SQLDialect) *InsertDataset { + cd := id.copy(id.GetClauses()) + cd.dialect = dialect + return cd +} + +func (id *InsertDataset) Expression() exp.Expression { + return id +} + +// Clones the dataset +func (id *InsertDataset) Clone() exp.Expression { + return id.copy(id.clauses) +} + +// Returns the current clauses on the dataset. +func (id *InsertDataset) GetClauses() exp.InsertClauses { + return id.clauses +} + +// used interally to copy the dataset +func (id *InsertDataset) copy(clauses exp.InsertClauses) *InsertDataset { + return &InsertDataset{ + dialect: id.dialect, + clauses: clauses, + isPrepared: id.isPrepared, + queryFactory: id.queryFactory, + err: id.err, + } +} + +// Creates a WITH clause for a common table expression (CTE). +// +// The name will be available to SELECT from in the associated query; and can optionally +// contain a list of column names "name(col1, col2, col3)". +// +// The name will refer to the results of the specified subquery. +func (id *InsertDataset) With(name string, subquery exp.Expression) *InsertDataset { + return id.copy(id.clauses.CommonTablesAppend(exp.NewCommonTableExpression(false, name, subquery))) +} + +// Creates a WITH RECURSIVE clause for a common table expression (CTE) +// +// The name will be available to SELECT from in the associated query; and must +// contain a list of column names "name(col1, col2, col3)" for a recursive clause. +// +// The name will refer to the results of the specified subquery. The subquery for +// a recursive query will always end with a UNION or UNION ALL with a clause that +// refers to the CTE by name. +func (id *InsertDataset) WithRecursive(name string, subquery exp.Expression) *InsertDataset { + return id.copy(id.clauses.CommonTablesAppend(exp.NewCommonTableExpression(true, name, subquery))) +} + +// Sets the table to insert INTO. This return a new dataset with the original table replaced. See examples. +// You can pass in the following. +// string: Will automatically be turned into an identifier +// Expression: Any valid expression (IdentifierExpression, AliasedExpression, Literal, etc.) +func (id *InsertDataset) Into(into interface{}) *InsertDataset { + switch t := into.(type) { + case exp.Expression: + return id.copy(id.clauses.SetInto(t)) + case string: + return id.copy(id.clauses.SetInto(exp.ParseIdentifier(t))) + default: + panic(ErrUnsupportedIntoType) + } +} + +// Sets the Columns to insert into +func (id *InsertDataset) Cols(cols ...interface{}) *InsertDataset { + return id.copy(id.clauses.SetCols(exp.NewColumnListExpression(cols...))) +} + +// Clears the Columns to insert into +func (id *InsertDataset) ClearCols() *InsertDataset { + return id.copy(id.clauses.SetCols(nil)) +} + +// Adds columns to the current list of columns clause. See examples +func (id *InsertDataset) ColsAppend(cols ...interface{}) *InsertDataset { + return id.copy(id.clauses.ColsAppend(exp.NewColumnListExpression(cols...))) +} + +// Adds a subquery to the insert. See examples. +func (id *InsertDataset) FromQuery(from exp.AppendableExpression) *InsertDataset { + if sds, ok := from.(*SelectDataset); ok { + if sds.dialect != GetDialect("default") && id.Dialect() != sds.dialect { + panic( + fmt.Errorf( + "incompatible dialects for INSERT (%q) and SELECT (%q)", + id.dialect.Dialect(), sds.dialect.Dialect(), + ), + ) + } + sds.dialect = id.dialect + } + return id.copy(id.clauses.SetFrom(from)) +} + +// Manually set values to insert See examples. +func (id *InsertDataset) Vals(vals ...[]interface{}) *InsertDataset { + return id.copy(id.clauses.ValsAppend(vals)) +} + +// Clears the values. See examples. +func (id *InsertDataset) ClearVals() *InsertDataset { + return id.copy(id.clauses.SetVals(nil)) +} + +// Insert rows. Rows can be a map, goqu.Record or struct. See examples. +func (id *InsertDataset) Rows(rows ...interface{}) *InsertDataset { + return id.copy(id.clauses.SetRows(rows)) +} + +// Clears the rows for this insert dataset. See examples. +func (id *InsertDataset) ClearRows() *InsertDataset { + return id.copy(id.clauses.SetRows(nil)) +} + +// Adds a RETURNING clause to the dataset if the adapter supports it See examples. +func (id *InsertDataset) Returning(returning ...interface{}) *InsertDataset { + return id.copy(id.clauses.SetReturning(exp.NewColumnListExpression(returning...))) +} + +// Adds an (ON CONFLICT/ON DUPLICATE KEY) clause to the dataset if the dialect supports it. See examples. +func (id *InsertDataset) OnConflict(conflict exp.ConflictExpression) *InsertDataset { + return id.copy(id.clauses.SetOnConflict(conflict)) +} + +// Clears the on conflict clause. See example +func (id *InsertDataset) ClearOnConflict() *InsertDataset { + return id.OnConflict(nil) +} + +// Get any error that has been set or nil if no error has been set. +func (id *InsertDataset) Error() error { + return id.err +} + +// Set an error on the dataset if one has not already been set. This error will be returned by a future call to Error +// or as part of ToSQL. This can be used by end users to record errors while building up queries without having to +// track those separately. +func (id *InsertDataset) SetError(err error) *InsertDataset { + if id.err == nil { + id.err = err + } + + return id +} + +// Generates the default INSERT statement. If Prepared has been called with true then the statement will not be +// interpolated. See examples. When using structs you may specify a column to be skipped in the insert, (e.g. id) by +// specifying a goqu tag with `skipinsert` +// type Item struct{ +// Id uint32 `db:"id" goqu:"skipinsert"` +// Name string `db:"name"` +// } +// +// rows: variable number arguments of either map[string]interface, Record, struct, or a single slice argument of the +// accepted types. +// +// Errors: +// * There is no INTO clause +// * Different row types passed in, all rows must be of the same type +// * Maps with different numbers of K/V pairs +// * Rows of different lengths, (i.e. (Record{"name": "a"}, Record{"name": "a", "age": 10}) +// * Error generating SQL +func (id *InsertDataset) ToSQL() (sql string, params []interface{}, err error) { + return id.insertSQLBuilder().ToSQL() +} + +// Appends this Dataset's INSERT statement to the SQLBuilder +// This is used internally when using inserts in CTEs +func (id *InsertDataset) AppendSQL(b sb.SQLBuilder) { + if id.err != nil { + b.SetError(id.err) + return + } + id.dialect.ToInsertSQL(b, id.GetClauses()) +} + +func (id *InsertDataset) GetAs() exp.IdentifierExpression { + return id.clauses.Alias() +} + +// Sets the alias for this dataset. This is typically used when using a Dataset as MySQL upsert +func (id *InsertDataset) As(alias string) *InsertDataset { + return id.copy(id.clauses.SetAlias(T(alias))) +} + +func (id *InsertDataset) ReturnsColumns() bool { + return id.clauses.HasReturning() +} + +// Generates the INSERT sql, and returns an QueryExecutor struct with the sql set to the INSERT statement +// db.Insert("test").Rows(Record{"name":"Bob"}).Executor().Exec() +// +func (id *InsertDataset) Executor() exec.QueryExecutor { + return id.queryFactory.FromSQLBuilder(id.insertSQLBuilder()) +} + +func (id *InsertDataset) insertSQLBuilder() sb.SQLBuilder { + buf := sb.NewSQLBuilder(id.isPrepared.Bool()) + if id.err != nil { + return buf.SetError(id.err) + } + id.dialect.ToInsertSQL(buf, id.clauses) + return buf +} diff --git a/vendor/github.com/doug-martin/goqu/v9/internal/errors/error.go b/vendor/github.com/doug-martin/goqu/v9/internal/errors/error.go new file mode 100644 index 000000000..d0cb8072f --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/internal/errors/error.go @@ -0,0 +1,19 @@ +package errors + +import "fmt" + +type Error struct { + err string +} + +func New(message string, args ...interface{}) error { + return Error{err: "goqu: " + fmt.Sprintf(message, args...)} +} + +func NewEncodeError(t interface{}) error { + return Error{err: "goqu_encode_error: " + fmt.Sprintf("Unable to encode value %+v", t)} +} + +func (e Error) Error() string { + return e.err +} diff --git a/vendor/github.com/doug-martin/goqu/v9/internal/sb/sql_builder.go b/vendor/github.com/doug-martin/goqu/v9/internal/sb/sql_builder.go new file mode 100644 index 000000000..e60e4572b --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/internal/sb/sql_builder.go @@ -0,0 +1,101 @@ +package sb + +import ( + "bytes" +) + +// Builder that is composed of a bytes.Buffer. It is used internally and by adapters to build SQL statements +type ( + SQLBuilder interface { + Error() error + SetError(err error) SQLBuilder + WriteArg(i ...interface{}) SQLBuilder + Write(p []byte) SQLBuilder + WriteStrings(ss ...string) SQLBuilder + WriteRunes(r ...rune) SQLBuilder + IsPrepared() bool + CurrentArgPosition() int + ToSQL() (sql string, args []interface{}, err error) + } + sqlBuilder struct { + buf *bytes.Buffer + // True if the sql should not be interpolated + isPrepared bool + // Current Number of arguments, used by adapters that need positional placeholders + currentArgPosition int + args []interface{} + err error + } +) + +func NewSQLBuilder(isPrepared bool) SQLBuilder { + return &sqlBuilder{ + buf: &bytes.Buffer{}, + isPrepared: isPrepared, + args: make([]interface{}, 0), + currentArgPosition: 1, + } +} + +func (b *sqlBuilder) Error() error { + return b.err +} + +func (b *sqlBuilder) SetError(err error) SQLBuilder { + if b.err == nil { + b.err = err + } + return b +} + +func (b *sqlBuilder) Write(bs []byte) SQLBuilder { + if b.err == nil { + b.buf.Write(bs) + } + return b +} + +func (b *sqlBuilder) WriteStrings(ss ...string) SQLBuilder { + if b.err == nil { + for _, s := range ss { + b.buf.WriteString(s) + } + } + return b +} + +func (b *sqlBuilder) WriteRunes(rs ...rune) SQLBuilder { + if b.err == nil { + for _, r := range rs { + b.buf.WriteRune(r) + } + } + return b +} + +// Returns true if the sql is a prepared statement +func (b *sqlBuilder) IsPrepared() bool { + return b.isPrepared +} + +// Returns true if the sql is a prepared statement +func (b *sqlBuilder) CurrentArgPosition() int { + return b.currentArgPosition +} + +// Adds an argument to the builder, used when IsPrepared is false +func (b *sqlBuilder) WriteArg(i ...interface{}) SQLBuilder { + if b.err == nil { + b.currentArgPosition += len(i) + b.args = append(b.args, i...) + } + return b +} + +// Returns the sql string, and arguments. +func (b *sqlBuilder) ToSQL() (sql string, args []interface{}, err error) { + if b.err != nil { + return sql, args, b.err + } + return b.buf.String(), b.args, nil +} diff --git a/vendor/github.com/doug-martin/goqu/v9/internal/tag/tags.go b/vendor/github.com/doug-martin/goqu/v9/internal/tag/tags.go new file mode 100644 index 000000000..03652e9e4 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/internal/tag/tags.go @@ -0,0 +1,51 @@ +package tag + +import ( + "reflect" + "strings" +) + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type Options string + +func New(tagName string, st reflect.StructTag) Options { + return Options(st.Get(tagName)) +} + +func (o Options) Values() []string { + if string(o) == "" { + return []string{} + } + return strings.Split(string(o), ",") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o Options) Contains(optionName string) bool { + if o.IsEmpty() { + return false + } + values := o.Values() + for _, s := range values { + if s == optionName { + return true + } + } + return false +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o Options) Equals(val string) bool { + if len(o) == 0 { + return false + } + return string(o) == val +} + +func (o Options) IsEmpty() bool { + return len(o) == 0 +} diff --git a/vendor/github.com/doug-martin/goqu/v9/internal/util/column_map.go b/vendor/github.com/doug-martin/goqu/v9/internal/util/column_map.go new file mode 100644 index 000000000..b82693419 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/internal/util/column_map.go @@ -0,0 +1,130 @@ +package util + +import ( + "reflect" + "sort" + "strings" + + "github.com/doug-martin/goqu/v9/internal/tag" +) + +type ( + ColumnData struct { + ColumnName string + FieldIndex []int + ShouldInsert bool + ShouldUpdate bool + DefaultIfEmpty bool + GoType reflect.Type + } + ColumnMap map[string]ColumnData +) + +func newColumnMap(t reflect.Type, fieldIndex []int, prefixes []string) ColumnMap { + cm, n := ColumnMap{}, t.NumField() + var subColMaps []ColumnMap + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Anonymous && (f.Type.Kind() == reflect.Struct || f.Type.Kind() == reflect.Ptr) { + goquTag := tag.New("db", f.Tag) + if !goquTag.Contains("-") { + subColMaps = append(subColMaps, getStructColumnMap(&f, fieldIndex, goquTag.Values(), prefixes)) + } + } else if f.PkgPath == "" { + dbTag := tag.New("db", f.Tag) + // if PkgPath is empty then it is an exported field + columnName := getColumnName(&f, dbTag) + if !shouldIgnoreField(dbTag) { + if !implementsScanner(f.Type) { + subCm := getStructColumnMap(&f, fieldIndex, []string{columnName}, prefixes) + if len(subCm) != 0 { + subColMaps = append(subColMaps, subCm) + continue + } + } + goquTag := tag.New("goqu", f.Tag) + columnName = strings.Join(append(prefixes, columnName), ".") + cm[columnName] = newColumnData(&f, columnName, fieldIndex, goquTag) + } + } + } + return cm.Merge(subColMaps) +} + +func (cm ColumnMap) Cols() []string { + structCols := make([]string, 0, len(cm)) + for key := range cm { + structCols = append(structCols, key) + } + sort.Strings(structCols) + return structCols +} + +func (cm ColumnMap) Merge(colMaps []ColumnMap) ColumnMap { + for _, subCm := range colMaps { + for key, val := range subCm { + if _, ok := cm[key]; !ok { + cm[key] = val + } + } + } + return cm +} + +func implementsScanner(t reflect.Type) bool { + if IsPointer(t.Kind()) { + t = t.Elem() + } + if reflect.PtrTo(t).Implements(scannerType) { + return true + } + if !IsStruct(t.Kind()) { + return true + } + + return false +} + +func newColumnData(f *reflect.StructField, columnName string, fieldIndex []int, goquTag tag.Options) ColumnData { + return ColumnData{ + ColumnName: columnName, + ShouldInsert: !goquTag.Contains(skipInsertTagName), + ShouldUpdate: !goquTag.Contains(skipUpdateTagName), + DefaultIfEmpty: goquTag.Contains(defaultIfEmptyTagName), + FieldIndex: concatFieldIndexes(fieldIndex, f.Index), + GoType: f.Type, + } +} + +func getStructColumnMap(f *reflect.StructField, fieldIndex []int, fieldNames, prefixes []string) ColumnMap { + subFieldIndexes := concatFieldIndexes(fieldIndex, f.Index) + subPrefixes := append(prefixes, fieldNames...) + if f.Type.Kind() == reflect.Ptr { + return newColumnMap(f.Type.Elem(), subFieldIndexes, subPrefixes) + } + return newColumnMap(f.Type, subFieldIndexes, subPrefixes) +} + +func getColumnName(f *reflect.StructField, dbTag tag.Options) string { + if dbTag.IsEmpty() { + return columnRenameFunction(f.Name) + } + return dbTag.Values()[0] +} + +func shouldIgnoreField(dbTag tag.Options) bool { + if dbTag.Equals("-") { + return true + } else if dbTag.IsEmpty() && ignoreUntaggedFields { + return true + } + + return false +} + +// safely concat two fieldIndex slices into one. +func concatFieldIndexes(fieldIndexPath, fieldIndex []int) []int { + fieldIndexes := make([]int, 0, len(fieldIndexPath)+len(fieldIndex)) + fieldIndexes = append(fieldIndexes, fieldIndexPath...) + return append(fieldIndexes, fieldIndex...) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/internal/util/reflect.go b/vendor/github.com/doug-martin/goqu/v9/internal/util/reflect.go new file mode 100644 index 000000000..bc18b8bd5 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/internal/util/reflect.go @@ -0,0 +1,220 @@ +package util + +import ( + "database/sql" + "reflect" + "strings" + "sync" + + "github.com/doug-martin/goqu/v9/internal/errors" +) + +const ( + skipUpdateTagName = "skipupdate" + skipInsertTagName = "skipinsert" + defaultIfEmptyTagName = "defaultifempty" +) + +var scannerType = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +func IsUint(k reflect.Kind) bool { + return (k == reflect.Uint) || + (k == reflect.Uint8) || + (k == reflect.Uint16) || + (k == reflect.Uint32) || + (k == reflect.Uint64) +} + +func IsInt(k reflect.Kind) bool { + return (k == reflect.Int) || + (k == reflect.Int8) || + (k == reflect.Int16) || + (k == reflect.Int32) || + (k == reflect.Int64) +} + +func IsFloat(k reflect.Kind) bool { + return (k == reflect.Float32) || + (k == reflect.Float64) +} + +func IsString(k reflect.Kind) bool { + return k == reflect.String +} + +func IsBool(k reflect.Kind) bool { + return k == reflect.Bool +} + +func IsSlice(k reflect.Kind) bool { + return k == reflect.Slice +} + +func IsStruct(k reflect.Kind) bool { + return k == reflect.Struct +} + +func IsInvalid(k reflect.Kind) bool { + return k == reflect.Invalid +} + +func IsPointer(k reflect.Kind) bool { + return k == reflect.Ptr +} + +func IsEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Invalid: + return true + default: + return false + } +} + +var ( + structMapCache = make(map[interface{}]ColumnMap) + structMapCacheLock = sync.Mutex{} +) + +var ( + DefaultColumnRenameFunction = strings.ToLower + columnRenameFunction = DefaultColumnRenameFunction + ignoreUntaggedFields = false +) + +func SetIgnoreUntaggedFields(ignore bool) { + // If the value here is changing, reset the struct map cache + if ignore != ignoreUntaggedFields { + ignoreUntaggedFields = ignore + + structMapCacheLock.Lock() + defer structMapCacheLock.Unlock() + + structMapCache = make(map[interface{}]ColumnMap) + } +} + +func SetColumnRenameFunction(newFunction func(string) string) { + columnRenameFunction = newFunction +} + +// GetSliceElementType returns the type for a slices elements. +func GetSliceElementType(val reflect.Value) reflect.Type { + elemType := val.Type().Elem() + if elemType.Kind() == reflect.Ptr { + elemType = elemType.Elem() + } + + return elemType +} + +// AppendSliceElement will append val to slice. Handles slice of pointers and +// not pointers. Val needs to be a pointer. +func AppendSliceElement(slice, val reflect.Value) { + if slice.Type().Elem().Kind() == reflect.Ptr { + slice.Set(reflect.Append(slice, val)) + } else { + slice.Set(reflect.Append(slice, reflect.Indirect(val))) + } +} + +func GetTypeInfo(i interface{}, val reflect.Value) (reflect.Type, reflect.Kind) { + var t reflect.Type + valKind := val.Kind() + if valKind == reflect.Slice { + if reflect.ValueOf(i).Kind() == reflect.Ptr { + t = reflect.TypeOf(i).Elem().Elem() + } else { + t = reflect.TypeOf(i).Elem() + } + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + valKind = t.Kind() + } else { + t = val.Type() + } + return t, valKind +} + +func SafeGetFieldByIndex(v reflect.Value, fieldIndex []int) (result reflect.Value, isAvailable bool) { + switch len(fieldIndex) { + case 0: + return v, true + case 1: + return v.FieldByIndex(fieldIndex), true + default: + if f := reflect.Indirect(v.Field(fieldIndex[0])); f.IsValid() { + return SafeGetFieldByIndex(f, fieldIndex[1:]) + } + } + return reflect.ValueOf(nil), false +} + +func SafeSetFieldByIndex(v reflect.Value, fieldIndex []int, src interface{}) (result reflect.Value) { + v = reflect.Indirect(v) + switch len(fieldIndex) { + case 0: + return v + case 1: + f := v.FieldByIndex(fieldIndex) + srcVal := reflect.ValueOf(src) + f.Set(reflect.Indirect(srcVal)) + default: + f := v.Field(fieldIndex[0]) + switch f.Kind() { + case reflect.Ptr: + s := f + if f.IsNil() || !f.IsValid() { + s = reflect.New(f.Type().Elem()) + f.Set(s) + } + SafeSetFieldByIndex(reflect.Indirect(s), fieldIndex[1:], src) + case reflect.Struct: + SafeSetFieldByIndex(f, fieldIndex[1:], src) + default: // use the original value + } + } + return v +} + +type rowData = map[string]interface{} + +// AssignStructVals will assign the data from rd to i. +func AssignStructVals(i interface{}, rd rowData, cm ColumnMap) { + val := reflect.Indirect(reflect.ValueOf(i)) + + for name, data := range cm { + src, ok := rd[name] + if ok { + SafeSetFieldByIndex(val, data.FieldIndex, src) + } + } +} + +func GetColumnMap(i interface{}) (ColumnMap, error) { + val := reflect.Indirect(reflect.ValueOf(i)) + t, valKind := GetTypeInfo(i, val) + if valKind != reflect.Struct { + return nil, errors.New("cannot scan into this type: %v", t) // #nosec + } + + structMapCacheLock.Lock() + defer structMapCacheLock.Unlock() + if _, ok := structMapCache[t]; !ok { + structMapCache[t] = newColumnMap(t, []int{}, []string{}) + } + return structMapCache[t], nil +} diff --git a/vendor/github.com/doug-martin/goqu/v9/internal/util/value_slice.go b/vendor/github.com/doug-martin/goqu/v9/internal/util/value_slice.go new file mode 100644 index 000000000..b013ed5ee --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/internal/util/value_slice.go @@ -0,0 +1,33 @@ +package util + +import ( + "fmt" + "reflect" + "sort" + "strings" +) + +type ValueSlice []reflect.Value + +func (vs ValueSlice) Len() int { return len(vs) } +func (vs ValueSlice) Less(i, j int) bool { return vs[i].String() < vs[j].String() } +func (vs ValueSlice) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } + +func (vs ValueSlice) Equal(other ValueSlice) bool { + sort.Sort(other) + for i, key := range vs { + if other[i].String() != key.String() { + return false + } + } + return true +} + +func (vs ValueSlice) String() string { + vals := make([]string, vs.Len()) + for i, key := range vs { + vals[i] = fmt.Sprintf(`"%s"`, key.String()) + } + sort.Strings(vals) + return fmt.Sprintf("[%s]", strings.Join(vals, ",")) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/prepared.go b/vendor/github.com/doug-martin/goqu/v9/prepared.go new file mode 100644 index 000000000..796ce98e9 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/prepared.go @@ -0,0 +1,48 @@ +package goqu + +var ( + // defaultPrepared is controlled by SetDefaultPrepared + defaultPrepared bool +) + +type prepared int + +const ( + // zero value that defers to defaultPrepared + preparedNoPreference prepared = iota + + // explicitly enabled via Prepared(true) on a dataset + preparedEnabled + + // explicitly disabled via Prepared(false) on a dataset + preparedDisabled +) + +// Bool converts the ternary prepared state into a boolean. If the prepared +// state is preparedNoPreference, the value depends on the last value that +// SetDefaultPrepared was called with which is false by default. +func (p prepared) Bool() bool { + if p == preparedNoPreference { + return defaultPrepared + } else if p == preparedEnabled { + return true + } + + return false +} + +// preparedFromBool converts a bool from e.g. Prepared(true) into a prepared +// const. +func preparedFromBool(prepared bool) prepared { + if prepared { + return preparedEnabled + } + + return preparedDisabled +} + +// SetDefaultPrepared controls the default Prepared state of all datasets. If +// set to true, any new dataset will use prepared queries by default. +func SetDefaultPrepared(prepared bool) { + defaultPrepared = prepared +} diff --git a/vendor/github.com/doug-martin/goqu/v9/select_dataset.go b/vendor/github.com/doug-martin/goqu/v9/select_dataset.go new file mode 100644 index 000000000..775c387df --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/select_dataset.go @@ -0,0 +1,695 @@ +package goqu + +import ( + "context" + "fmt" + + "github.com/doug-martin/goqu/v9/exec" + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +// Dataset for creating and/or executing SELECT SQL statements. +type SelectDataset struct { + dialect SQLDialect + clauses exp.SelectClauses + isPrepared prepared + queryFactory exec.QueryFactory + err error +} + +var ErrQueryFactoryNotFoundError = errors.New( + "unable to execute query did you use goqu.Database#From to create the dataset", +) + +// used internally by database to create a database with a specific adapter +func newDataset(d string, queryFactory exec.QueryFactory) *SelectDataset { + return &SelectDataset{ + clauses: exp.NewSelectClauses(), + dialect: GetDialect(d), + queryFactory: queryFactory, + } +} + +func From(table ...interface{}) *SelectDataset { + return newDataset("default", nil).From(table...) +} + +func Select(cols ...interface{}) *SelectDataset { + return newDataset("default", nil).Select(cols...) +} + +// Sets the adapter used to serialize values and create the SQL statement +func (sd *SelectDataset) WithDialect(dl string) *SelectDataset { + ds := sd.copy(sd.GetClauses()) + ds.dialect = GetDialect(dl) + return ds +} + +// Set the parameter interpolation behavior. See examples +// +// prepared: If true the dataset WILL NOT interpolate the parameters. +func (sd *SelectDataset) Prepared(prepared bool) *SelectDataset { + ret := sd.copy(sd.clauses) + ret.isPrepared = preparedFromBool(prepared) + return ret +} + +func (sd *SelectDataset) IsPrepared() bool { + return sd.isPrepared.Bool() +} + +// Returns the current adapter on the dataset +func (sd *SelectDataset) Dialect() SQLDialect { + return sd.dialect +} + +// Returns the current adapter on the dataset +func (sd *SelectDataset) SetDialect(dialect SQLDialect) *SelectDataset { + cd := sd.copy(sd.GetClauses()) + cd.dialect = dialect + return cd +} + +func (sd *SelectDataset) Expression() exp.Expression { + return sd +} + +// Clones the dataset +func (sd *SelectDataset) Clone() exp.Expression { + return sd.copy(sd.clauses) +} + +// Returns the current clauses on the dataset. +func (sd *SelectDataset) GetClauses() exp.SelectClauses { + return sd.clauses +} + +// used interally to copy the dataset +func (sd *SelectDataset) copy(clauses exp.SelectClauses) *SelectDataset { + return &SelectDataset{ + dialect: sd.dialect, + clauses: clauses, + isPrepared: sd.isPrepared, + queryFactory: sd.queryFactory, + err: sd.err, + } +} + +// Creates a new UpdateDataset using the FROM of this dataset. This method will also copy over the `WITH`, `WHERE`, +// `ORDER , and `LIMIT` +func (sd *SelectDataset) Update() *UpdateDataset { + u := newUpdateDataset(sd.dialect.Dialect(), sd.queryFactory). + Prepared(sd.isPrepared.Bool()) + if sd.clauses.HasSources() { + u = u.Table(sd.GetClauses().From().Columns()[0]) + } + c := u.clauses + for _, ce := range sd.clauses.CommonTables() { + c = c.CommonTablesAppend(ce) + } + if sd.clauses.Where() != nil { + c = c.WhereAppend(sd.clauses.Where()) + } + if sd.clauses.HasLimit() { + c = c.SetLimit(sd.clauses.Limit()) + } + if sd.clauses.HasOrder() { + for _, oe := range sd.clauses.Order().Columns() { + c = c.OrderAppend(oe.(exp.OrderedExpression)) + } + } + u.clauses = c + return u +} + +// Creates a new InsertDataset using the FROM of this dataset. This method will also copy over the `WITH` clause to the +// insert. +func (sd *SelectDataset) Insert() *InsertDataset { + i := newInsertDataset(sd.dialect.Dialect(), sd.queryFactory). + Prepared(sd.isPrepared.Bool()) + if sd.clauses.HasSources() { + i = i.Into(sd.GetClauses().From().Columns()[0]) + } + c := i.clauses + for _, ce := range sd.clauses.CommonTables() { + c = c.CommonTablesAppend(ce) + } + i.clauses = c + return i +} + +// Creates a new DeleteDataset using the FROM of this dataset. This method will also copy over the `WITH`, `WHERE`, +// `ORDER , and `LIMIT` +func (sd *SelectDataset) Delete() *DeleteDataset { + d := newDeleteDataset(sd.dialect.Dialect(), sd.queryFactory). + Prepared(sd.isPrepared.Bool()) + if sd.clauses.HasSources() { + d = d.From(sd.clauses.From().Columns()[0]) + } + c := d.clauses + for _, ce := range sd.clauses.CommonTables() { + c = c.CommonTablesAppend(ce) + } + if sd.clauses.Where() != nil { + c = c.WhereAppend(sd.clauses.Where()) + } + if sd.clauses.HasLimit() { + c = c.SetLimit(sd.clauses.Limit()) + } + if sd.clauses.HasOrder() { + for _, oe := range sd.clauses.Order().Columns() { + c = c.OrderAppend(oe.(exp.OrderedExpression)) + } + } + d.clauses = c + return d +} + +// Creates a new TruncateDataset using the FROM of this dataset. +func (sd *SelectDataset) Truncate() *TruncateDataset { + td := newTruncateDataset(sd.dialect.Dialect(), sd.queryFactory) + if sd.clauses.HasSources() { + td = td.Table(sd.clauses.From()) + } + return td +} + +// Creates a WITH clause for a common table expression (CTE). +// +// The name will be available to SELECT from in the associated query; and can optionally +// contain a list of column names "name(col1, col2, col3)". +// +// The name will refer to the results of the specified subquery. +func (sd *SelectDataset) With(name string, subquery exp.Expression) *SelectDataset { + return sd.copy(sd.clauses.CommonTablesAppend(exp.NewCommonTableExpression(false, name, subquery))) +} + +// Creates a WITH RECURSIVE clause for a common table expression (CTE) +// +// The name will be available to SELECT from in the associated query; and must +// contain a list of column names "name(col1, col2, col3)" for a recursive clause. +// +// The name will refer to the results of the specified subquery. The subquery for +// a recursive query will always end with a UNION or UNION ALL with a clause that +// refers to the CTE by name. +func (sd *SelectDataset) WithRecursive(name string, subquery exp.Expression) *SelectDataset { + return sd.copy(sd.clauses.CommonTablesAppend(exp.NewCommonTableExpression(true, name, subquery))) +} + +// Adds columns to the SELECT clause. See examples +// You can pass in the following. +// string: Will automatically be turned into an identifier +// Dataset: Will use the SQL generated from that Dataset. If the dataset is aliased it will use that alias as the +// column name. +// LiteralExpression: (See Literal) Will use the literal SQL +// SQLFunction: (See Func, MIN, MAX, COUNT....) +// Struct: If passing in an instance of a struct, we will parse the struct for the column names to select. +// See examples +func (sd *SelectDataset) Select(selects ...interface{}) *SelectDataset { + if len(selects) == 0 { + return sd.ClearSelect() + } + return sd.copy(sd.clauses.SetSelect(exp.NewColumnListExpression(selects...))) +} + +// Adds columns to the SELECT DISTINCT clause. See examples +// You can pass in the following. +// string: Will automatically be turned into an identifier +// Dataset: Will use the SQL generated from that Dataset. If the dataset is aliased it will use that alias as the +// column name. +// LiteralExpression: (See Literal) Will use the literal SQL +// SQLFunction: (See Func, MIN, MAX, COUNT....) +// Struct: If passing in an instance of a struct, we will parse the struct for the column names to select. +// See examples +// Deprecated: Use Distinct() instead. +func (sd *SelectDataset) SelectDistinct(selects ...interface{}) *SelectDataset { + if len(selects) == 0 { + cleared := sd.ClearSelect() + return cleared.copy(cleared.clauses.SetDistinct(nil)) + } + return sd.copy(sd.clauses.SetSelect(exp.NewColumnListExpression(selects...)).SetDistinct(exp.NewColumnListExpression())) +} + +// Resets to SELECT *. If the SelectDistinct or Distinct was used the returned Dataset will have the the dataset set to SELECT *. +// See examples. +func (sd *SelectDataset) ClearSelect() *SelectDataset { + return sd.copy(sd.clauses.SetSelect(exp.NewColumnListExpression(exp.Star())).SetDistinct(nil)) +} + +// Adds columns to the SELECT clause. See examples +// You can pass in the following. +// string: Will automatically be turned into an identifier +// Dataset: Will use the SQL generated from that Dataset. If the dataset is aliased it will use that alias as the +// column name. +// LiteralExpression: (See Literal) Will use the literal SQL +// SQLFunction: (See Func, MIN, MAX, COUNT....) +func (sd *SelectDataset) SelectAppend(selects ...interface{}) *SelectDataset { + return sd.copy(sd.clauses.SelectAppend(exp.NewColumnListExpression(selects...))) +} + +func (sd *SelectDataset) Distinct(on ...interface{}) *SelectDataset { + return sd.copy(sd.clauses.SetDistinct(exp.NewColumnListExpression(on...))) +} + +// Adds a FROM clause. This return a new dataset with the original sources replaced. See examples. +// You can pass in the following. +// string: Will automatically be turned into an identifier +// Dataset: Will be added as a sub select. If the Dataset is not aliased it will automatically be aliased +// LiteralExpression: (See Literal) Will use the literal SQL +func (sd *SelectDataset) From(from ...interface{}) *SelectDataset { + var sources []interface{} + numSources := 0 + for _, source := range from { + if ds, ok := source.(*SelectDataset); ok && !ds.clauses.HasAlias() { + numSources++ + sources = append(sources, ds.As(fmt.Sprintf("t%d", numSources))) + } else { + sources = append(sources, source) + } + } + return sd.copy(sd.clauses.SetFrom(exp.NewColumnListExpression(sources...))) +} + +// Returns a new Dataset with the current one as an source. If the current Dataset is not aliased (See Dataset#As) then +// it will automatically be aliased. See examples. +func (sd *SelectDataset) FromSelf() *SelectDataset { + return sd.copy(exp.NewSelectClauses()).From(sd) +} + +// Alias to InnerJoin. See examples. +func (sd *SelectDataset) Join(table exp.Expression, condition exp.JoinCondition) *SelectDataset { + return sd.InnerJoin(table, condition) +} + +// Adds an INNER JOIN clause. See examples. +func (sd *SelectDataset) InnerJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset { + return sd.joinTable(exp.NewConditionedJoinExpression(exp.InnerJoinType, table, condition)) +} + +// Adds a FULL OUTER JOIN clause. See examples. +func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset { + return sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition)) +} + +// Adds a RIGHT OUTER JOIN clause. See examples. +func (sd *SelectDataset) RightOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset { + return sd.joinTable(exp.NewConditionedJoinExpression(exp.RightOuterJoinType, table, condition)) +} + +// Adds a LEFT OUTER JOIN clause. See examples. +func (sd *SelectDataset) LeftOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset { + return sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftOuterJoinType, table, condition)) +} + +// Adds a FULL JOIN clause. See examples. +func (sd *SelectDataset) FullJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset { + return sd.joinTable(exp.NewConditionedJoinExpression(exp.FullJoinType, table, condition)) +} + +// Adds a RIGHT JOIN clause. See examples. +func (sd *SelectDataset) RightJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset { + return sd.joinTable(exp.NewConditionedJoinExpression(exp.RightJoinType, table, condition)) +} + +// Adds a LEFT JOIN clause. See examples. +func (sd *SelectDataset) LeftJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset { + return sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftJoinType, table, condition)) +} + +// Adds a NATURAL JOIN clause. See examples. +func (sd *SelectDataset) NaturalJoin(table exp.Expression) *SelectDataset { + return sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalJoinType, table)) +} + +// Adds a NATURAL LEFT JOIN clause. See examples. +func (sd *SelectDataset) NaturalLeftJoin(table exp.Expression) *SelectDataset { + return sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalLeftJoinType, table)) +} + +// Adds a NATURAL RIGHT JOIN clause. See examples. +func (sd *SelectDataset) NaturalRightJoin(table exp.Expression) *SelectDataset { + return sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalRightJoinType, table)) +} + +// Adds a NATURAL FULL JOIN clause. See examples. +func (sd *SelectDataset) NaturalFullJoin(table exp.Expression) *SelectDataset { + return sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalFullJoinType, table)) +} + +// Adds a CROSS JOIN clause. See examples. +func (sd *SelectDataset) CrossJoin(table exp.Expression) *SelectDataset { + return sd.joinTable(exp.NewUnConditionedJoinExpression(exp.CrossJoinType, table)) +} + +// Joins this Datasets table with another +func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset { + return sd.copy(sd.clauses.JoinsAppend(join)) +} + +// Adds a WHERE clause. See examples. +func (sd *SelectDataset) Where(expressions ...exp.Expression) *SelectDataset { + return sd.copy(sd.clauses.WhereAppend(expressions...)) +} + +// Removes the WHERE clause. See examples. +func (sd *SelectDataset) ClearWhere() *SelectDataset { + return sd.copy(sd.clauses.ClearWhere()) +} + +// Adds a FOR UPDATE clause. See examples. +func (sd *SelectDataset) ForUpdate(waitOption exp.WaitOption, of ...exp.IdentifierExpression) *SelectDataset { + return sd.withLock(exp.ForUpdate, waitOption, of...) +} + +// Adds a FOR NO KEY UPDATE clause. See examples. +func (sd *SelectDataset) ForNoKeyUpdate(waitOption exp.WaitOption, of ...exp.IdentifierExpression) *SelectDataset { + return sd.withLock(exp.ForNoKeyUpdate, waitOption, of...) +} + +// Adds a FOR KEY SHARE clause. See examples. +func (sd *SelectDataset) ForKeyShare(waitOption exp.WaitOption, of ...exp.IdentifierExpression) *SelectDataset { + return sd.withLock(exp.ForKeyShare, waitOption, of...) +} + +// Adds a FOR SHARE clause. See examples. +func (sd *SelectDataset) ForShare(waitOption exp.WaitOption, of ...exp.IdentifierExpression) *SelectDataset { + return sd.withLock(exp.ForShare, waitOption, of...) +} + +func (sd *SelectDataset) withLock(strength exp.LockStrength, option exp.WaitOption, of ...exp.IdentifierExpression) *SelectDataset { + return sd.copy(sd.clauses.SetLock(exp.NewLock(strength, option, of...))) +} + +// Adds a GROUP BY clause. See examples. +func (sd *SelectDataset) GroupBy(groupBy ...interface{}) *SelectDataset { + return sd.copy(sd.clauses.SetGroupBy(exp.NewColumnListExpression(groupBy...))) +} + +// Adds more columns to the current GROUP BY clause. See examples. +func (sd *SelectDataset) GroupByAppend(groupBy ...interface{}) *SelectDataset { + return sd.copy(sd.clauses.GroupByAppend(exp.NewColumnListExpression(groupBy...))) +} + +// Adds a HAVING clause. See examples. +func (sd *SelectDataset) Having(expressions ...exp.Expression) *SelectDataset { + return sd.copy(sd.clauses.HavingAppend(expressions...)) +} + +// Adds a ORDER clause. If the ORDER is currently set it replaces it. See examples. +func (sd *SelectDataset) Order(order ...exp.OrderedExpression) *SelectDataset { + return sd.copy(sd.clauses.SetOrder(order...)) +} + +// Adds a more columns to the current ORDER BY clause. If no order has be previously specified it is the same as +// calling Order. See examples. +func (sd *SelectDataset) OrderAppend(order ...exp.OrderedExpression) *SelectDataset { + return sd.copy(sd.clauses.OrderAppend(order...)) +} + +// Adds a more columns to the beginning of the current ORDER BY clause. If no order has be previously specified it is the same as +// calling Order. See examples. +func (sd *SelectDataset) OrderPrepend(order ...exp.OrderedExpression) *SelectDataset { + return sd.copy(sd.clauses.OrderPrepend(order...)) +} + +// Removes the ORDER BY clause. See examples. +func (sd *SelectDataset) ClearOrder() *SelectDataset { + return sd.copy(sd.clauses.ClearOrder()) +} + +// Adds a LIMIT clause. If the LIMIT is currently set it replaces it. See examples. +func (sd *SelectDataset) Limit(limit uint) *SelectDataset { + if limit > 0 { + return sd.copy(sd.clauses.SetLimit(limit)) + } + return sd.copy(sd.clauses.ClearLimit()) +} + +// Adds a LIMIT ALL clause. If the LIMIT is currently set it replaces it. See examples. +func (sd *SelectDataset) LimitAll() *SelectDataset { + return sd.copy(sd.clauses.SetLimit(L("ALL"))) +} + +// Removes the LIMIT clause. +func (sd *SelectDataset) ClearLimit() *SelectDataset { + return sd.copy(sd.clauses.ClearLimit()) +} + +// Adds an OFFSET clause. If the OFFSET is currently set it replaces it. See examples. +func (sd *SelectDataset) Offset(offset uint) *SelectDataset { + return sd.copy(sd.clauses.SetOffset(offset)) +} + +// Removes the OFFSET clause from the Dataset +func (sd *SelectDataset) ClearOffset() *SelectDataset { + return sd.copy(sd.clauses.ClearOffset()) +} + +// Creates an UNION statement with another dataset. +// If this or the other dataset has a limit or offset it will use that dataset as a subselect in the FROM clause. +// See examples. +func (sd *SelectDataset) Union(other *SelectDataset) *SelectDataset { + return sd.withCompound(exp.UnionCompoundType, other.CompoundFromSelf()) +} + +// Creates an UNION ALL statement with another dataset. +// If this or the other dataset has a limit or offset it will use that dataset as a subselect in the FROM clause. +// See examples. +func (sd *SelectDataset) UnionAll(other *SelectDataset) *SelectDataset { + return sd.withCompound(exp.UnionAllCompoundType, other.CompoundFromSelf()) +} + +// Creates an INTERSECT statement with another dataset. +// If this or the other dataset has a limit or offset it will use that dataset as a subselect in the FROM clause. +// See examples. +func (sd *SelectDataset) Intersect(other *SelectDataset) *SelectDataset { + return sd.withCompound(exp.IntersectCompoundType, other.CompoundFromSelf()) +} + +// Creates an INTERSECT ALL statement with another dataset. +// If this or the other dataset has a limit or offset it will use that dataset as a subselect in the FROM clause. +// See examples. +func (sd *SelectDataset) IntersectAll(other *SelectDataset) *SelectDataset { + return sd.withCompound(exp.IntersectAllCompoundType, other.CompoundFromSelf()) +} + +func (sd *SelectDataset) withCompound(ct exp.CompoundType, other exp.AppendableExpression) *SelectDataset { + ce := exp.NewCompoundExpression(ct, other) + ret := sd.CompoundFromSelf() + ret.clauses = ret.clauses.CompoundsAppend(ce) + return ret +} + +// Used internally to determine if the dataset needs to use iteself as a source. +// If the dataset has an order or limit it will select from itself +func (sd *SelectDataset) CompoundFromSelf() *SelectDataset { + if sd.clauses.HasOrder() || sd.clauses.HasLimit() { + return sd.FromSelf() + } + return sd.copy(sd.clauses) +} + +// Sets the alias for this dataset. This is typically used when using a Dataset as a subselect. See examples. +func (sd *SelectDataset) As(alias string) *SelectDataset { + return sd.copy(sd.clauses.SetAlias(T(alias))) +} + +// Returns the alias value as an identiier expression +func (sd *SelectDataset) GetAs() exp.IdentifierExpression { + return sd.clauses.Alias() +} + +// Sets the WINDOW clauses +func (sd *SelectDataset) Window(ws ...exp.WindowExpression) *SelectDataset { + return sd.copy(sd.clauses.SetWindows(ws)) +} + +// Sets the WINDOW clauses +func (sd *SelectDataset) WindowAppend(ws ...exp.WindowExpression) *SelectDataset { + return sd.copy(sd.clauses.WindowsAppend(ws...)) +} + +// Sets the WINDOW clauses +func (sd *SelectDataset) ClearWindow() *SelectDataset { + return sd.copy(sd.clauses.ClearWindows()) +} + +// Get any error that has been set or nil if no error has been set. +func (sd *SelectDataset) Error() error { + return sd.err +} + +// Set an error on the dataset if one has not already been set. This error will be returned by a future call to Error +// or as part of ToSQL. This can be used by end users to record errors while building up queries without having to +// track those separately. +func (sd *SelectDataset) SetError(err error) *SelectDataset { + if sd.err == nil { + sd.err = err + } + + return sd +} + +// Generates a SELECT sql statement, if Prepared has been called with true then the parameters will not be interpolated. +// See examples. +// +// Errors: +// * There is an error generating the SQL +func (sd *SelectDataset) ToSQL() (sql string, params []interface{}, err error) { + return sd.selectSQLBuilder().ToSQL() +} + +// Generates the SELECT sql, and returns an Exec struct with the sql set to the SELECT statement +// db.From("test").Select("col").Executor() +// +// See Dataset#ToUpdateSQL for arguments +func (sd *SelectDataset) Executor() exec.QueryExecutor { + return sd.queryFactory.FromSQLBuilder(sd.selectSQLBuilder()) +} + +// Appends this Dataset's SELECT statement to the SQLBuilder +// This is used internally for sub-selects by the dialect +func (sd *SelectDataset) AppendSQL(b sb.SQLBuilder) { + if sd.err != nil { + b.SetError(sd.err) + return + } + sd.dialect.ToSelectSQL(b, sd.GetClauses()) +} + +func (sd *SelectDataset) ReturnsColumns() bool { + return true +} + +// Generates the SELECT sql for this dataset and uses Exec#ScanStructs to scan the results into a slice of structs. +// +// ScanStructs will only select the columns that can be scanned in to the struct unless you have explicitly selected +// certain columns. See examples. +// +// i: A pointer to a slice of structs +func (sd *SelectDataset) ScanStructs(i interface{}) error { + return sd.ScanStructsContext(context.Background(), i) +} + +// Generates the SELECT sql for this dataset and uses Exec#ScanStructsContext to scan the results into a slice of +// structs. +// +// ScanStructsContext will only select the columns that can be scanned in to the struct unless you have explicitly +// selected certain columns. See examples. +// +// i: A pointer to a slice of structs +func (sd *SelectDataset) ScanStructsContext(ctx context.Context, i interface{}) error { + if sd.queryFactory == nil { + return ErrQueryFactoryNotFoundError + } + ds := sd + if sd.GetClauses().IsDefaultSelect() { + ds = sd.Select(i) + } + return ds.Executor().ScanStructsContext(ctx, i) +} + +// Generates the SELECT sql for this dataset and uses Exec#ScanStruct to scan the result into a slice of structs +// +// ScanStruct will only select the columns that can be scanned in to the struct unless you have explicitly selected +// certain columns. See examples. +// +// i: A pointer to a structs +func (sd *SelectDataset) ScanStruct(i interface{}) (bool, error) { + return sd.ScanStructContext(context.Background(), i) +} + +// Generates the SELECT sql for this dataset and uses Exec#ScanStructContext to scan the result into a slice of structs +// +// ScanStructContext will only select the columns that can be scanned in to the struct unless you have explicitly +// selected certain columns. See examples. +// +// i: A pointer to a structs +func (sd *SelectDataset) ScanStructContext(ctx context.Context, i interface{}) (bool, error) { + if sd.queryFactory == nil { + return false, ErrQueryFactoryNotFoundError + } + ds := sd + if sd.GetClauses().IsDefaultSelect() { + ds = sd.Select(i) + } + return ds.Limit(1).Executor().ScanStructContext(ctx, i) +} + +// Generates the SELECT sql for this dataset and uses Exec#ScanVals to scan the results into a slice of primitive values +// +// i: A pointer to a slice of primitive values +func (sd *SelectDataset) ScanVals(i interface{}) error { + return sd.ScanValsContext(context.Background(), i) +} + +// Generates the SELECT sql for this dataset and uses Exec#ScanValsContext to scan the results into a slice of primitive +// values +// +// i: A pointer to a slice of primitive values +func (sd *SelectDataset) ScanValsContext(ctx context.Context, i interface{}) error { + if sd.queryFactory == nil { + return ErrQueryFactoryNotFoundError + } + return sd.Executor().ScanValsContext(ctx, i) +} + +// Generates the SELECT sql for this dataset and uses Exec#ScanVal to scan the result into a primitive value +// +// i: A pointer to a primitive value +func (sd *SelectDataset) ScanVal(i interface{}) (bool, error) { + return sd.ScanValContext(context.Background(), i) +} + +// Generates the SELECT sql for this dataset and uses Exec#ScanValContext to scan the result into a primitive value +// +// i: A pointer to a primitive value +func (sd *SelectDataset) ScanValContext(ctx context.Context, i interface{}) (bool, error) { + if sd.queryFactory == nil { + return false, ErrQueryFactoryNotFoundError + } + return sd.Limit(1).Executor().ScanValContext(ctx, i) +} + +// Generates the SELECT COUNT(*) sql for this dataset and uses Exec#ScanVal to scan the result into an int64. +func (sd *SelectDataset) Count() (int64, error) { + return sd.CountContext(context.Background()) +} + +// Generates the SELECT COUNT(*) sql for this dataset and uses Exec#ScanValContext to scan the result into an int64. +func (sd *SelectDataset) CountContext(ctx context.Context) (int64, error) { + var count int64 + _, err := sd.Select(COUNT(Star()).As("count")).ScanValContext(ctx, &count) + return count, err +} + +// Generates the SELECT sql only selecting the passed in column and uses Exec#ScanVals to scan the result into a slice +// of primitive values. +// +// i: A slice of primitive values +// +// col: The column to select when generative the SQL +func (sd *SelectDataset) Pluck(i interface{}, col string) error { + return sd.PluckContext(context.Background(), i, col) +} + +// Generates the SELECT sql only selecting the passed in column and uses Exec#ScanValsContext to scan the result into a +// slice of primitive values. +// +// i: A slice of primitive values +// +// col: The column to select when generative the SQL +func (sd *SelectDataset) PluckContext(ctx context.Context, i interface{}, col string) error { + return sd.Select(col).ScanValsContext(ctx, i) +} + +func (sd *SelectDataset) selectSQLBuilder() sb.SQLBuilder { + buf := sb.NewSQLBuilder(sd.isPrepared.Bool()) + if sd.err != nil { + return buf.SetError(sd.err) + } + sd.dialect.ToSelectSQL(buf, sd.GetClauses()) + return buf +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sql_dialect.go b/vendor/github.com/doug-martin/goqu/v9/sql_dialect.go new file mode 100644 index 000000000..796827411 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sql_dialect.go @@ -0,0 +1,103 @@ +package goqu + +import ( + "strings" + "sync" + + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/sb" + "github.com/doug-martin/goqu/v9/sqlgen" +) + +type ( + SQLDialectOptions = sqlgen.SQLDialectOptions + // An adapter interface to be used by a Dataset to generate SQL for a specific dialect. + // See DefaultAdapter for a concrete implementation and examples. + SQLDialect interface { + Dialect() string + ToSelectSQL(b sb.SQLBuilder, clauses exp.SelectClauses) + ToUpdateSQL(b sb.SQLBuilder, clauses exp.UpdateClauses) + ToInsertSQL(b sb.SQLBuilder, clauses exp.InsertClauses) + ToDeleteSQL(b sb.SQLBuilder, clauses exp.DeleteClauses) + ToTruncateSQL(b sb.SQLBuilder, clauses exp.TruncateClauses) + } + // The default adapter. This class should be used when building a new adapter. When creating a new adapter you can + // either override methods, or more typically update default values. + // See (github.com/doug-martin/goqu/dialect/postgres) + sqlDialect struct { + dialect string + dialectOptions *SQLDialectOptions + selectGen sqlgen.SelectSQLGenerator + updateGen sqlgen.UpdateSQLGenerator + insertGen sqlgen.InsertSQLGenerator + deleteGen sqlgen.DeleteSQLGenerator + truncateGen sqlgen.TruncateSQLGenerator + } +) + +var ( + dialects = make(map[string]SQLDialect) + DefaultDialectOptions = sqlgen.DefaultDialectOptions + dialectsMu sync.RWMutex +) + +func init() { + RegisterDialect("default", DefaultDialectOptions()) +} + +func RegisterDialect(name string, do *SQLDialectOptions) { + dialectsMu.Lock() + defer dialectsMu.Unlock() + lowerName := strings.ToLower(name) + dialects[lowerName] = newDialect(lowerName, do) +} + +func DeregisterDialect(name string) { + dialectsMu.Lock() + defer dialectsMu.Unlock() + delete(dialects, strings.ToLower(name)) +} + +func GetDialect(name string) SQLDialect { + name = strings.ToLower(name) + if d, ok := dialects[name]; ok { + return d + } + return newDialect("default", DefaultDialectOptions()) +} + +func newDialect(dialect string, do *SQLDialectOptions) SQLDialect { + return &sqlDialect{ + dialect: dialect, + dialectOptions: do, + selectGen: sqlgen.NewSelectSQLGenerator(dialect, do), + updateGen: sqlgen.NewUpdateSQLGenerator(dialect, do), + insertGen: sqlgen.NewInsertSQLGenerator(dialect, do), + deleteGen: sqlgen.NewDeleteSQLGenerator(dialect, do), + truncateGen: sqlgen.NewTruncateSQLGenerator(dialect, do), + } +} + +func (d *sqlDialect) Dialect() string { + return d.dialect +} + +func (d *sqlDialect) ToSelectSQL(b sb.SQLBuilder, clauses exp.SelectClauses) { + d.selectGen.Generate(b, clauses) +} + +func (d *sqlDialect) ToUpdateSQL(b sb.SQLBuilder, clauses exp.UpdateClauses) { + d.updateGen.Generate(b, clauses) +} + +func (d *sqlDialect) ToInsertSQL(b sb.SQLBuilder, clauses exp.InsertClauses) { + d.insertGen.Generate(b, clauses) +} + +func (d *sqlDialect) ToDeleteSQL(b sb.SQLBuilder, clauses exp.DeleteClauses) { + d.deleteGen.Generate(b, clauses) +} + +func (d *sqlDialect) ToTruncateSQL(b sb.SQLBuilder, clauses exp.TruncateClauses) { + d.truncateGen.Generate(b, clauses) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sqlgen/common_sql_generator.go b/vendor/github.com/doug-martin/goqu/v9/sqlgen/common_sql_generator.go new file mode 100644 index 000000000..b54a87019 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sqlgen/common_sql_generator.go @@ -0,0 +1,155 @@ +package sqlgen + +import ( + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +var ErrNoUpdatedValuesProvided = errors.New("no update values provided") + +func ErrCTENotSupported(dialect string) error { + return errors.New("dialect does not support CTE WITH clause [dialect=%s]", dialect) +} + +func ErrRecursiveCTENotSupported(dialect string) error { + return errors.New("dialect does not support CTE WITH RECURSIVE clause [dialect=%s]", dialect) +} + +func ErrReturnNotSupported(dialect string) error { + return errors.New("dialect does not support RETURNING clause [dialect=%s]", dialect) +} + +func ErrNotSupportedFragment(sqlType string, f SQLFragmentType) error { + return errors.New("unsupported %s SQL fragment %s", sqlType, f) +} + +type ( + CommonSQLGenerator interface { + Dialect() string + DialectOptions() *SQLDialectOptions + ExpressionSQLGenerator() ExpressionSQLGenerator + ReturningSQL(b sb.SQLBuilder, returns exp.ColumnListExpression) + FromSQL(b sb.SQLBuilder, from exp.ColumnListExpression) + SourcesSQL(b sb.SQLBuilder, from exp.ColumnListExpression) + WhereSQL(b sb.SQLBuilder, where exp.ExpressionList) + OrderSQL(b sb.SQLBuilder, order exp.ColumnListExpression) + OrderWithOffsetFetchSQL(b sb.SQLBuilder, order exp.ColumnListExpression, offset uint, limit interface{}) + LimitSQL(b sb.SQLBuilder, limit interface{}) + UpdateExpressionSQL(b sb.SQLBuilder, updates ...exp.UpdateExpression) + } + commonSQLGenerator struct { + dialect string + esg ExpressionSQLGenerator + dialectOptions *SQLDialectOptions + } +) + +func NewCommonSQLGenerator(dialect string, do *SQLDialectOptions) CommonSQLGenerator { + return &commonSQLGenerator{dialect: dialect, esg: NewExpressionSQLGenerator(dialect, do), dialectOptions: do} +} + +func (csg *commonSQLGenerator) Dialect() string { + return csg.dialect +} + +func (csg *commonSQLGenerator) DialectOptions() *SQLDialectOptions { + return csg.dialectOptions +} + +func (csg *commonSQLGenerator) ExpressionSQLGenerator() ExpressionSQLGenerator { + return csg.esg +} + +func (csg *commonSQLGenerator) ReturningSQL(b sb.SQLBuilder, returns exp.ColumnListExpression) { + if returns != nil && len(returns.Columns()) > 0 { + if csg.dialectOptions.SupportsReturn { + b.Write(csg.dialectOptions.ReturningFragment) + csg.esg.Generate(b, returns) + } else { + b.SetError(ErrReturnNotSupported(csg.dialect)) + } + } +} + +// Adds the FROM clause and tables to an sql statement +func (csg *commonSQLGenerator) FromSQL(b sb.SQLBuilder, from exp.ColumnListExpression) { + if from != nil && !from.IsEmpty() { + b.Write(csg.dialectOptions.FromFragment) + csg.SourcesSQL(b, from) + } +} + +// Adds the generates the SQL for a column list +func (csg *commonSQLGenerator) SourcesSQL(b sb.SQLBuilder, from exp.ColumnListExpression) { + b.WriteRunes(csg.dialectOptions.SpaceRune) + csg.esg.Generate(b, from) +} + +// Generates the WHERE clause for an SQL statement +func (csg *commonSQLGenerator) WhereSQL(b sb.SQLBuilder, where exp.ExpressionList) { + if where != nil && !where.IsEmpty() { + b.Write(csg.dialectOptions.WhereFragment) + csg.esg.Generate(b, where) + } +} + +// Generates the ORDER BY clause for an SQL statement +func (csg *commonSQLGenerator) OrderSQL(b sb.SQLBuilder, order exp.ColumnListExpression) { + if order != nil && len(order.Columns()) > 0 { + b.Write(csg.dialectOptions.OrderByFragment) + csg.esg.Generate(b, order) + } +} + +func (csg *commonSQLGenerator) OrderWithOffsetFetchSQL( + b sb.SQLBuilder, + order exp.ColumnListExpression, + offset uint, + limit interface{}, +) { + if order == nil { + return + } + + csg.OrderSQL(b, order) + if offset > 0 { + b.Write(csg.dialectOptions.OffsetFragment) + csg.esg.Generate(b, offset) + b.Write([]byte(" ROWS")) + + if limit != nil { + b.Write(csg.dialectOptions.FetchFragment) + csg.esg.Generate(b, limit) + b.Write([]byte(" ROWS ONLY")) + } + } +} + +// Generates the LIMIT clause for an SQL statement +func (csg *commonSQLGenerator) LimitSQL(b sb.SQLBuilder, limit interface{}) { + if limit != nil { + b.Write(csg.dialectOptions.LimitFragment) + if csg.dialectOptions.SurroundLimitWithParentheses { + b.WriteRunes(csg.dialectOptions.LeftParenRune) + } + csg.esg.Generate(b, limit) + if csg.dialectOptions.SurroundLimitWithParentheses { + b.WriteRunes(csg.dialectOptions.RightParenRune) + } + } +} + +func (csg *commonSQLGenerator) UpdateExpressionSQL(b sb.SQLBuilder, updates ...exp.UpdateExpression) { + if len(updates) == 0 { + b.SetError(ErrNoUpdatedValuesProvided) + return + } + updateLen := len(updates) + for i, update := range updates { + csg.esg.Generate(b, update) + if i < updateLen-1 { + b.WriteRunes(csg.dialectOptions.CommaRune) + } + } +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sqlgen/delete_sql_generator.go b/vendor/github.com/doug-martin/goqu/v9/sqlgen/delete_sql_generator.go new file mode 100644 index 000000000..107708a28 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sqlgen/delete_sql_generator.go @@ -0,0 +1,72 @@ +package sqlgen + +import ( + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +type ( + // An adapter interface to be used by a Dataset to generate SQL for a specific dialect. + // See DefaultAdapter for a concrete implementation and examples. + DeleteSQLGenerator interface { + Dialect() string + Generate(b sb.SQLBuilder, clauses exp.DeleteClauses) + } + // The default adapter. This class should be used when building a new adapter. When creating a new adapter you can + // either override methods, or more typically update default values. + // See (github.com/doug-martin/goqu/dialect/postgres) + deleteSQLGenerator struct { + CommonSQLGenerator + } +) + +var ErrNoSourceForDelete = errors.New("no source found when generating delete sql") + +func NewDeleteSQLGenerator(dialect string, do *SQLDialectOptions) DeleteSQLGenerator { + return &deleteSQLGenerator{NewCommonSQLGenerator(dialect, do)} +} + +func (dsg *deleteSQLGenerator) Generate(b sb.SQLBuilder, clauses exp.DeleteClauses) { + if !clauses.HasFrom() { + b.SetError(ErrNoSourceForDelete) + return + } + for _, f := range dsg.DialectOptions().DeleteSQLOrder { + if b.Error() != nil { + return + } + switch f { + case CommonTableSQLFragment: + dsg.ExpressionSQLGenerator().Generate(b, clauses.CommonTables()) + case DeleteBeginSQLFragment: + dsg.DeleteBeginSQL( + b, exp.NewColumnListExpression(clauses.From()), !(clauses.HasLimit() || clauses.HasOrder()), + ) + case FromSQLFragment: + dsg.FromSQL(b, exp.NewColumnListExpression(clauses.From())) + case WhereSQLFragment: + dsg.WhereSQL(b, clauses.Where()) + case OrderSQLFragment: + if dsg.DialectOptions().SupportsOrderByOnDelete { + dsg.OrderSQL(b, clauses.Order()) + } + case LimitSQLFragment: + if dsg.DialectOptions().SupportsLimitOnDelete { + dsg.LimitSQL(b, clauses.Limit()) + } + case ReturningSQLFragment: + dsg.ReturningSQL(b, clauses.Returning()) + default: + b.SetError(ErrNotSupportedFragment("DELETE", f)) + } + } +} + +// Adds the correct fragment to being an DELETE statement +func (dsg *deleteSQLGenerator) DeleteBeginSQL(b sb.SQLBuilder, from exp.ColumnListExpression, multiTable bool) { + b.Write(dsg.DialectOptions().DeleteClause) + if multiTable && dsg.DialectOptions().SupportsDeleteTableHint { + dsg.SourcesSQL(b, from) + } +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sqlgen/expression_sql_generator.go b/vendor/github.com/doug-martin/goqu/v9/sqlgen/expression_sql_generator.go new file mode 100644 index 000000000..82ce15c58 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sqlgen/expression_sql_generator.go @@ -0,0 +1,733 @@ +package sqlgen + +import ( + "database/sql/driver" + "reflect" + "strconv" + "time" + "unicode/utf8" + + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" + "github.com/doug-martin/goqu/v9/internal/util" +) + +type ( + // An adapter interface to be used by a Dataset to generate SQL for a specific dialect. + // See DefaultAdapter for a concrete implementation and examples. + ExpressionSQLGenerator interface { + Dialect() string + Generate(b sb.SQLBuilder, val interface{}) + } + // The default adapter. This class should be used when building a new adapter. When creating a new adapter you can + // either override methods, or more typically update default values. + // See (github.com/doug-martin/goqu/dialect/postgres) + expressionSQLGenerator struct { + dialect string + dialectOptions *SQLDialectOptions + } +) + +var ( + replacementRune = '?' + TrueLiteral = exp.NewLiteralExpression("TRUE") + FalseLiteral = exp.NewLiteralExpression("FALSE") + + ErrEmptyIdentifier = errors.New( + `a empty identifier was encountered, please specify a "schema", "table" or "column"`, + ) + ErrUnexpectedNamedWindow = errors.New(`unexpected named window function`) + ErrEmptyCaseWhens = errors.New(`when conditions not found for case statement`) +) + +func errUnsupportedExpressionType(e exp.Expression) error { + return errors.New("unsupported expression type %T", e) +} + +func errUnsupportedIdentifierExpression(t interface{}) error { + return errors.New("unexpected col type must be string or LiteralExpression received %T", t) +} + +func errUnsupportedBooleanExpressionOperator(op exp.BooleanOperation) error { + return errors.New("boolean operator '%+v' not supported", op) +} + +func errUnsupportedBitwiseExpressionOperator(op exp.BitwiseOperation) error { + return errors.New("bitwise operator '%+v' not supported", op) +} + +func errUnsupportedRangeExpressionOperator(op exp.RangeOperation) error { + return errors.New("range operator %+v not supported", op) +} + +func errLateralNotSupported(dialect string) error { + return errors.New("dialect does not support lateral expressions [dialect=%s]", dialect) +} + +func NewExpressionSQLGenerator(dialect string, do *SQLDialectOptions) ExpressionSQLGenerator { + return &expressionSQLGenerator{dialect: dialect, dialectOptions: do} +} + +func (esg *expressionSQLGenerator) Dialect() string { + return esg.dialect +} + +var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +func (esg *expressionSQLGenerator) Generate(b sb.SQLBuilder, val interface{}) { + if b.Error() != nil { + return + } + if val == nil { + esg.literalNil(b) + return + } + switch v := val.(type) { + case exp.Expression: + esg.expressionSQL(b, v) + case int: + esg.literalInt(b, int64(v)) + case int32: + esg.literalInt(b, int64(v)) + case int64: + esg.literalInt(b, v) + case float32: + esg.literalFloat(b, float64(v)) + case float64: + esg.literalFloat(b, v) + case string: + esg.literalString(b, v) + case bool: + esg.literalBool(b, v) + case time.Time: + esg.literalTime(b, v) + case *time.Time: + if v == nil { + esg.literalNil(b) + return + } + esg.literalTime(b, *v) + case driver.Valuer: + // See https://github.com/golang/go/commit/0ce1d79a6a771f7449ec493b993ed2a720917870 + if rv := reflect.ValueOf(val); rv.Kind() == reflect.Ptr && + rv.IsNil() && + rv.Type().Elem().Implements(valuerReflectType) { + esg.literalNil(b) + return + } + dVal, err := v.Value() + if err != nil { + b.SetError(err) + return + } + esg.Generate(b, dVal) + default: + esg.reflectSQL(b, val) + } +} + +func (esg *expressionSQLGenerator) reflectSQL(b sb.SQLBuilder, val interface{}) { + v := reflect.Indirect(reflect.ValueOf(val)) + valKind := v.Kind() + switch { + case util.IsInvalid(valKind): + esg.literalNil(b) + case util.IsSlice(valKind): + switch t := val.(type) { + case []byte: + esg.literalBytes(b, t) + case []exp.CommonTableExpression: + esg.commonTablesSliceSQL(b, t) + default: + esg.sliceValueSQL(b, v) + } + case util.IsInt(valKind): + esg.Generate(b, v.Int()) + case util.IsUint(valKind): + esg.Generate(b, int64(v.Uint())) + case util.IsFloat(valKind): + esg.Generate(b, v.Float()) + case util.IsString(valKind): + esg.Generate(b, v.String()) + case util.IsBool(valKind): + esg.Generate(b, v.Bool()) + default: + b.SetError(errors.NewEncodeError(val)) + } +} + +// nolint:gocyclo // not complex just long +func (esg *expressionSQLGenerator) expressionSQL(b sb.SQLBuilder, expression exp.Expression) { + switch e := expression.(type) { + case exp.ColumnListExpression: + esg.columnListSQL(b, e) + case exp.ExpressionList: + esg.expressionListSQL(b, e) + case exp.LiteralExpression: + esg.literalExpressionSQL(b, e) + case exp.IdentifierExpression: + esg.identifierExpressionSQL(b, e) + case exp.LateralExpression: + esg.lateralExpressionSQL(b, e) + case exp.AliasedExpression: + esg.aliasedExpressionSQL(b, e) + case exp.BooleanExpression: + esg.booleanExpressionSQL(b, e) + case exp.BitwiseExpression: + esg.bitwiseExpressionSQL(b, e) + case exp.RangeExpression: + esg.rangeExpressionSQL(b, e) + case exp.OrderedExpression: + esg.orderedExpressionSQL(b, e) + case exp.UpdateExpression: + esg.updateExpressionSQL(b, e) + case exp.SQLFunctionExpression: + esg.sqlFunctionExpressionSQL(b, e) + case exp.SQLWindowFunctionExpression: + esg.sqlWindowFunctionExpression(b, e) + case exp.WindowExpression: + esg.windowExpressionSQL(b, e) + case exp.CastExpression: + esg.castExpressionSQL(b, e) + case exp.AppendableExpression: + esg.appendableExpressionSQL(b, e) + case exp.CommonTableExpression: + esg.commonTableExpressionSQL(b, e) + case exp.CompoundExpression: + esg.compoundExpressionSQL(b, e) + case exp.CaseExpression: + esg.caseExpressionSQL(b, e) + case exp.Ex: + esg.expressionMapSQL(b, e) + case exp.ExOr: + esg.expressionOrMapSQL(b, e) + default: + b.SetError(errUnsupportedExpressionType(e)) + } +} + +// Generates a placeholder (e.g. ?, $1) +func (esg *expressionSQLGenerator) placeHolderSQL(b sb.SQLBuilder, i interface{}) { + b.Write(esg.dialectOptions.PlaceHolderFragment) + if esg.dialectOptions.IncludePlaceholderNum { + b.WriteStrings(strconv.FormatInt(int64(b.CurrentArgPosition()), 10)) + } + b.WriteArg(i) +} + +// Generates creates the sql for a sub select on a Dataset +func (esg *expressionSQLGenerator) appendableExpressionSQL(b sb.SQLBuilder, a exp.AppendableExpression) { + b.WriteRunes(esg.dialectOptions.LeftParenRune) + a.AppendSQL(b) + b.WriteRunes(esg.dialectOptions.RightParenRune) + if a.GetAs() != nil { + b.Write(esg.dialectOptions.AsFragment) + esg.Generate(b, a.GetAs()) + } +} + +// Quotes an identifier (e.g. "col", "table"."col" +func (esg *expressionSQLGenerator) identifierExpressionSQL(b sb.SQLBuilder, ident exp.IdentifierExpression) { + if ident.IsEmpty() { + b.SetError(ErrEmptyIdentifier) + return + } + schema, table, col := ident.GetSchema(), ident.GetTable(), ident.GetCol() + if schema != esg.dialectOptions.EmptyString { + b.WriteRunes(esg.dialectOptions.QuoteRune). + WriteStrings(schema). + WriteRunes(esg.dialectOptions.QuoteRune) + } + if table != esg.dialectOptions.EmptyString { + if schema != esg.dialectOptions.EmptyString { + b.WriteRunes(esg.dialectOptions.PeriodRune) + } + b.WriteRunes(esg.dialectOptions.QuoteRune). + WriteStrings(table). + WriteRunes(esg.dialectOptions.QuoteRune) + } + switch t := col.(type) { + case nil: + case string: + if col != esg.dialectOptions.EmptyString { + if table != esg.dialectOptions.EmptyString || schema != esg.dialectOptions.EmptyString { + b.WriteRunes(esg.dialectOptions.PeriodRune) + } + b.WriteRunes(esg.dialectOptions.QuoteRune). + WriteStrings(t). + WriteRunes(esg.dialectOptions.QuoteRune) + } + case exp.LiteralExpression: + if table != esg.dialectOptions.EmptyString || schema != esg.dialectOptions.EmptyString { + b.WriteRunes(esg.dialectOptions.PeriodRune) + } + esg.Generate(b, t) + default: + b.SetError(errUnsupportedIdentifierExpression(col)) + } +} + +func (esg *expressionSQLGenerator) lateralExpressionSQL(b sb.SQLBuilder, le exp.LateralExpression) { + if !esg.dialectOptions.SupportsLateral { + b.SetError(errLateralNotSupported(esg.dialect)) + return + } + b.Write(esg.dialectOptions.LateralFragment) + esg.Generate(b, le.Table()) +} + +// Generates SQL NULL value +func (esg *expressionSQLGenerator) literalNil(b sb.SQLBuilder) { + if b.IsPrepared() { + esg.placeHolderSQL(b, nil) + return + } + b.Write(esg.dialectOptions.Null) +} + +// Generates SQL bool literal, (e.g. TRUE, FALSE, mysql 1, 0, sqlite3 1, 0) +func (esg *expressionSQLGenerator) literalBool(b sb.SQLBuilder, bl bool) { + if b.IsPrepared() { + esg.placeHolderSQL(b, bl) + return + } + if bl { + b.Write(esg.dialectOptions.True) + } else { + b.Write(esg.dialectOptions.False) + } +} + +// Generates SQL for a time.Time value +func (esg *expressionSQLGenerator) literalTime(b sb.SQLBuilder, t time.Time) { + if b.IsPrepared() { + esg.placeHolderSQL(b, t) + return + } + esg.Generate(b, t.In(timeLocation).Format(esg.dialectOptions.TimeFormat)) +} + +// Generates SQL for a Float Value +func (esg *expressionSQLGenerator) literalFloat(b sb.SQLBuilder, f float64) { + if b.IsPrepared() { + esg.placeHolderSQL(b, f) + return + } + b.WriteStrings(strconv.FormatFloat(f, 'f', -1, 64)) +} + +// Generates SQL for an int value +func (esg *expressionSQLGenerator) literalInt(b sb.SQLBuilder, i int64) { + if b.IsPrepared() { + esg.placeHolderSQL(b, i) + return + } + b.WriteStrings(strconv.FormatInt(i, 10)) +} + +// Generates SQL for a string +func (esg *expressionSQLGenerator) literalString(b sb.SQLBuilder, s string) { + if b.IsPrepared() { + esg.placeHolderSQL(b, s) + return + } + b.WriteRunes(esg.dialectOptions.StringQuote) + for _, char := range s { + if e, ok := esg.dialectOptions.EscapedRunes[char]; ok { + b.Write(e) + } else { + b.WriteRunes(char) + } + } + + b.WriteRunes(esg.dialectOptions.StringQuote) +} + +// Generates SQL for a slice of bytes +func (esg *expressionSQLGenerator) literalBytes(b sb.SQLBuilder, bs []byte) { + if b.IsPrepared() { + esg.placeHolderSQL(b, bs) + return + } + b.WriteRunes(esg.dialectOptions.StringQuote) + i := 0 + for len(bs) > 0 { + char, l := utf8.DecodeRune(bs) + if e, ok := esg.dialectOptions.EscapedRunes[char]; ok { + b.Write(e) + } else { + b.WriteRunes(char) + } + i++ + bs = bs[l:] + } + b.WriteRunes(esg.dialectOptions.StringQuote) +} + +// Generates SQL for a slice of values (e.g. []int64{1,2,3,4} -> (1,2,3,4) +func (esg *expressionSQLGenerator) sliceValueSQL(b sb.SQLBuilder, slice reflect.Value) { + b.WriteRunes(esg.dialectOptions.LeftParenRune) + for i, l := 0, slice.Len(); i < l; i++ { + esg.Generate(b, slice.Index(i).Interface()) + if i < l-1 { + b.WriteRunes(esg.dialectOptions.CommaRune, esg.dialectOptions.SpaceRune) + } + } + b.WriteRunes(esg.dialectOptions.RightParenRune) +} + +// Generates SQL for an AliasedExpression (e.g. I("a").As("b") -> "a" AS "b") +func (esg *expressionSQLGenerator) aliasedExpressionSQL(b sb.SQLBuilder, aliased exp.AliasedExpression) { + esg.Generate(b, aliased.Aliased()) + b.Write(esg.dialectOptions.AsFragment) + esg.Generate(b, aliased.GetAs()) +} + +// Generates SQL for a BooleanExpresion (e.g. I("a").Eq(2) -> "a" = 2) +func (esg *expressionSQLGenerator) booleanExpressionSQL(b sb.SQLBuilder, operator exp.BooleanExpression) { + b.WriteRunes(esg.dialectOptions.LeftParenRune) + esg.Generate(b, operator.LHS()) + b.WriteRunes(esg.dialectOptions.SpaceRune) + operatorOp := operator.Op() + if val, ok := esg.dialectOptions.BooleanOperatorLookup[operatorOp]; ok { + b.Write(val) + } else { + b.SetError(errUnsupportedBooleanExpressionOperator(operatorOp)) + return + } + rhs := operator.RHS() + + if (operatorOp == exp.IsOp || operatorOp == exp.IsNotOp) && rhs != nil && !esg.dialectOptions.BooleanDataTypeSupported { + b.SetError(errors.New("boolean data type is not supported by dialect %q", esg.dialect)) + return + } + + if (operatorOp == exp.IsOp || operatorOp == exp.IsNotOp) && esg.dialectOptions.UseLiteralIsBools { + // these values must be interpolated because preparing them generates invalid SQL + switch rhs { + case true: + rhs = TrueLiteral + case false: + rhs = FalseLiteral + case nil: + rhs = exp.NewLiteralExpression(string(esg.dialectOptions.Null)) + } + } + b.WriteRunes(esg.dialectOptions.SpaceRune) + + if (operatorOp == exp.IsOp || operatorOp == exp.IsNotOp) && rhs == nil && !esg.dialectOptions.BooleanDataTypeSupported { + // e.g. for SQL server dialect which does not support "IS @p1" for "IS NULL" + b.Write(esg.dialectOptions.Null) + } else { + esg.Generate(b, rhs) + } + + b.WriteRunes(esg.dialectOptions.RightParenRune) +} + +// Generates SQL for a BitwiseExpresion (e.g. I("a").BitwiseOr(2) - > "a" | 2) +func (esg *expressionSQLGenerator) bitwiseExpressionSQL(b sb.SQLBuilder, operator exp.BitwiseExpression) { + b.WriteRunes(esg.dialectOptions.LeftParenRune) + + if operator.LHS() != nil { + esg.Generate(b, operator.LHS()) + b.WriteRunes(esg.dialectOptions.SpaceRune) + } + + operatorOp := operator.Op() + if val, ok := esg.dialectOptions.BitwiseOperatorLookup[operatorOp]; ok { + b.Write(val) + } else { + b.SetError(errUnsupportedBitwiseExpressionOperator(operatorOp)) + return + } + + b.WriteRunes(esg.dialectOptions.SpaceRune) + esg.Generate(b, operator.RHS()) + b.WriteRunes(esg.dialectOptions.RightParenRune) +} + +// Generates SQL for a RangeExpresion (e.g. I("a").Between(RangeVal{Start:2,End:5}) -> "a" BETWEEN 2 AND 5) +func (esg *expressionSQLGenerator) rangeExpressionSQL(b sb.SQLBuilder, operator exp.RangeExpression) { + b.WriteRunes(esg.dialectOptions.LeftParenRune) + esg.Generate(b, operator.LHS()) + b.WriteRunes(esg.dialectOptions.SpaceRune) + operatorOp := operator.Op() + if val, ok := esg.dialectOptions.RangeOperatorLookup[operatorOp]; ok { + b.Write(val) + } else { + b.SetError(errUnsupportedRangeExpressionOperator(operatorOp)) + return + } + rhs := operator.RHS() + b.WriteRunes(esg.dialectOptions.SpaceRune) + esg.Generate(b, rhs.Start()) + b.Write(esg.dialectOptions.AndFragment) + esg.Generate(b, rhs.End()) + b.WriteRunes(esg.dialectOptions.RightParenRune) +} + +// Generates SQL for an OrderedExpression (e.g. I("a").Asc() -> "a" ASC) +func (esg *expressionSQLGenerator) orderedExpressionSQL(b sb.SQLBuilder, order exp.OrderedExpression) { + esg.Generate(b, order.SortExpression()) + if order.IsAsc() { + b.Write(esg.dialectOptions.AscFragment) + } else { + b.Write(esg.dialectOptions.DescFragment) + } + switch order.NullSortType() { + case exp.NoNullsSortType: + return + case exp.NullsFirstSortType: + b.Write(esg.dialectOptions.NullsFirstFragment) + case exp.NullsLastSortType: + b.Write(esg.dialectOptions.NullsLastFragment) + } +} + +// Generates SQL for an ExpressionList (e.g. And(I("a").Eq("a"), I("b").Eq("b")) -> (("a" = 'a') AND ("b" = 'b'))) +func (esg *expressionSQLGenerator) expressionListSQL(b sb.SQLBuilder, expressionList exp.ExpressionList) { + if expressionList.IsEmpty() { + return + } + var op []byte + if expressionList.Type() == exp.AndType { + op = esg.dialectOptions.AndFragment + } else { + op = esg.dialectOptions.OrFragment + } + exps := expressionList.Expressions() + expLen := len(exps) - 1 + if expLen > 0 { + b.WriteRunes(esg.dialectOptions.LeftParenRune) + } else { + esg.Generate(b, exps[0]) + return + } + for i, e := range exps { + esg.Generate(b, e) + if i < expLen { + b.Write(op) + } + } + b.WriteRunes(esg.dialectOptions.RightParenRune) +} + +// Generates SQL for a ColumnListExpression +func (esg *expressionSQLGenerator) columnListSQL(b sb.SQLBuilder, columnList exp.ColumnListExpression) { + cols := columnList.Columns() + colLen := len(cols) + for i, col := range cols { + esg.Generate(b, col) + if i < colLen-1 { + b.WriteRunes(esg.dialectOptions.CommaRune, esg.dialectOptions.SpaceRune) + } + } +} + +// Generates SQL for an UpdateEpxresion +func (esg *expressionSQLGenerator) updateExpressionSQL(b sb.SQLBuilder, update exp.UpdateExpression) { + esg.Generate(b, update.Col()) + b.WriteRunes(esg.dialectOptions.SetOperatorRune) + esg.Generate(b, update.Val()) +} + +// Generates SQL for a LiteralExpression +// L("a + b") -> a + b +// L("a = ?", 1) -> a = 1 +func (esg *expressionSQLGenerator) literalExpressionSQL(b sb.SQLBuilder, literal exp.LiteralExpression) { + l := literal.Literal() + args := literal.Args() + if argsLen := len(args); argsLen > 0 { + currIndex := 0 + for _, char := range l { + if char == replacementRune && currIndex < argsLen { + esg.Generate(b, args[currIndex]) + currIndex++ + } else { + b.WriteRunes(char) + } + } + return + } + b.WriteStrings(l) +} + +// Generates SQL for a SQLFunctionExpression +// COUNT(I("a")) -> COUNT("a") +func (esg *expressionSQLGenerator) sqlFunctionExpressionSQL(b sb.SQLBuilder, sqlFunc exp.SQLFunctionExpression) { + b.WriteStrings(sqlFunc.Name()) + esg.Generate(b, sqlFunc.Args()) +} + +func (esg *expressionSQLGenerator) sqlWindowFunctionExpression(b sb.SQLBuilder, sqlWinFunc exp.SQLWindowFunctionExpression) { + if !esg.dialectOptions.SupportsWindowFunction { + b.SetError(ErrWindowNotSupported(esg.dialect)) + return + } + esg.Generate(b, sqlWinFunc.Func()) + b.Write(esg.dialectOptions.WindowOverFragment) + switch { + case sqlWinFunc.HasWindowName(): + esg.Generate(b, sqlWinFunc.WindowName()) + case sqlWinFunc.HasWindow(): + if sqlWinFunc.Window().HasName() { + b.SetError(ErrUnexpectedNamedWindow) + return + } + esg.Generate(b, sqlWinFunc.Window()) + default: + esg.Generate(b, exp.NewWindowExpression(nil, nil, nil, nil)) + } +} + +func (esg *expressionSQLGenerator) windowExpressionSQL(b sb.SQLBuilder, we exp.WindowExpression) { + if !esg.dialectOptions.SupportsWindowFunction { + b.SetError(ErrWindowNotSupported(esg.dialect)) + return + } + if we.HasName() { + esg.Generate(b, we.Name()) + b.Write(esg.dialectOptions.AsFragment) + } + b.WriteRunes(esg.dialectOptions.LeftParenRune) + + hasPartition := we.HasPartitionBy() + hasOrder := we.HasOrder() + + if we.HasParent() { + esg.Generate(b, we.Parent()) + if hasPartition || hasOrder { + b.WriteRunes(esg.dialectOptions.SpaceRune) + } + } + + if hasPartition { + b.Write(esg.dialectOptions.WindowPartitionByFragment) + esg.Generate(b, we.PartitionCols()) + if hasOrder { + b.WriteRunes(esg.dialectOptions.SpaceRune) + } + } + if hasOrder { + b.Write(esg.dialectOptions.WindowOrderByFragment) + esg.Generate(b, we.OrderCols()) + } + + b.WriteRunes(esg.dialectOptions.RightParenRune) +} + +// Generates SQL for a CastExpression +// I("a").Cast("NUMERIC") -> CAST("a" AS NUMERIC) +func (esg *expressionSQLGenerator) castExpressionSQL(b sb.SQLBuilder, cast exp.CastExpression) { + b.Write(esg.dialectOptions.CastFragment).WriteRunes(esg.dialectOptions.LeftParenRune) + esg.Generate(b, cast.Casted()) + b.Write(esg.dialectOptions.AsFragment) + esg.Generate(b, cast.Type()) + b.WriteRunes(esg.dialectOptions.RightParenRune) +} + +// Generates the sql for the WITH clauses for common table expressions (CTE) +func (esg *expressionSQLGenerator) commonTablesSliceSQL(b sb.SQLBuilder, ctes []exp.CommonTableExpression) { + l := len(ctes) + if l == 0 { + return + } + if !esg.dialectOptions.SupportsWithCTE { + b.SetError(ErrCTENotSupported(esg.dialect)) + return + } + b.Write(esg.dialectOptions.WithFragment) + anyRecursive := false + for _, cte := range ctes { + anyRecursive = anyRecursive || cte.IsRecursive() + } + if anyRecursive { + if !esg.dialectOptions.SupportsWithCTERecursive { + b.SetError(ErrRecursiveCTENotSupported(esg.dialect)) + return + } + b.Write(esg.dialectOptions.RecursiveFragment) + } + for i, cte := range ctes { + esg.Generate(b, cte) + if i < l-1 { + b.WriteRunes(esg.dialectOptions.CommaRune, esg.dialectOptions.SpaceRune) + } + } + b.WriteRunes(esg.dialectOptions.SpaceRune) +} + +// Generates SQL for a CommonTableExpression +func (esg *expressionSQLGenerator) commonTableExpressionSQL(b sb.SQLBuilder, cte exp.CommonTableExpression) { + esg.Generate(b, cte.Name()) + b.Write(esg.dialectOptions.AsFragment) + esg.Generate(b, cte.SubQuery()) +} + +// Generates SQL for a CompoundExpression +func (esg *expressionSQLGenerator) compoundExpressionSQL(b sb.SQLBuilder, compound exp.CompoundExpression) { + switch compound.Type() { + case exp.UnionCompoundType: + b.Write(esg.dialectOptions.UnionFragment) + case exp.UnionAllCompoundType: + b.Write(esg.dialectOptions.UnionAllFragment) + case exp.IntersectCompoundType: + b.Write(esg.dialectOptions.IntersectFragment) + case exp.IntersectAllCompoundType: + b.Write(esg.dialectOptions.IntersectAllFragment) + } + if esg.dialectOptions.WrapCompoundsInParens { + b.WriteRunes(esg.dialectOptions.LeftParenRune) + compound.RHS().AppendSQL(b) + b.WriteRunes(esg.dialectOptions.RightParenRune) + } else { + compound.RHS().AppendSQL(b) + } +} + +// Generates SQL for a CaseExpression +func (esg *expressionSQLGenerator) caseExpressionSQL(b sb.SQLBuilder, caseExpression exp.CaseExpression) { + caseVal := caseExpression.GetValue() + whens := caseExpression.GetWhens() + elseResult := caseExpression.GetElse() + + if len(whens) == 0 { + b.SetError(ErrEmptyCaseWhens) + return + } + b.Write(esg.dialectOptions.CaseFragment) + if caseVal != nil { + esg.Generate(b, caseVal) + } + for _, when := range whens { + b.Write(esg.dialectOptions.WhenFragment) + esg.Generate(b, when.Condition()) + b.Write(esg.dialectOptions.ThenFragment) + esg.Generate(b, when.Result()) + } + if elseResult != nil { + b.Write(esg.dialectOptions.ElseFragment) + esg.Generate(b, elseResult.Result()) + } + b.Write(esg.dialectOptions.EndFragment) +} + +func (esg *expressionSQLGenerator) expressionMapSQL(b sb.SQLBuilder, ex exp.Ex) { + expressionList, err := ex.ToExpressions() + if err != nil { + b.SetError(err) + return + } + esg.Generate(b, expressionList) +} + +func (esg *expressionSQLGenerator) expressionOrMapSQL(b sb.SQLBuilder, ex exp.ExOr) { + expressionList, err := ex.ToExpressions() + if err != nil { + b.SetError(err) + return + } + esg.Generate(b, expressionList) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sqlgen/insert_sql_generator.go b/vendor/github.com/doug-martin/goqu/v9/sqlgen/insert_sql_generator.go new file mode 100644 index 000000000..1c6105b93 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sqlgen/insert_sql_generator.go @@ -0,0 +1,203 @@ +package sqlgen + +import ( + "strings" + + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +type ( + // An adapter interface to be used by a Dataset to generate SQL for a specific dialect. + // See DefaultAdapter for a concrete implementation and examples. + InsertSQLGenerator interface { + Dialect() string + Generate(b sb.SQLBuilder, clauses exp.InsertClauses) + } + // The default adapter. This class should be used when building a new adapter. When creating a new adapter you can + // either override methods, or more typically update default values. + // See (github.com/doug-martin/goqu/dialect/postgres) + insertSQLGenerator struct { + CommonSQLGenerator + } +) + +var ( + ErrConflictUpdateValuesRequired = errors.New("values are required for on conflict update expression") + ErrNoSourceForInsert = errors.New("no source found when generating insert sql") +) + +func errMisMatchedRowLength(expectedL, actualL int) error { + return errors.New("rows with different value length expected %d got %d", expectedL, actualL) +} + +func errUpsertWithWhereNotSupported(dialect string) error { + return errors.New("dialect does not support upsert with where clause [dialect=%s]", dialect) +} + +func NewInsertSQLGenerator(dialect string, do *SQLDialectOptions) InsertSQLGenerator { + return &insertSQLGenerator{NewCommonSQLGenerator(dialect, do)} +} + +func (isg *insertSQLGenerator) Generate( + b sb.SQLBuilder, + clauses exp.InsertClauses, +) { + if !clauses.HasInto() { + b.SetError(ErrNoSourceForInsert) + return + } + for _, f := range isg.DialectOptions().InsertSQLOrder { + if b.Error() != nil { + return + } + switch f { + case CommonTableSQLFragment: + isg.ExpressionSQLGenerator().Generate(b, clauses.CommonTables()) + case InsertBeingSQLFragment: + isg.InsertBeginSQL(b, clauses.OnConflict()) + case IntoSQLFragment: + b.WriteRunes(isg.DialectOptions().SpaceRune) + isg.ExpressionSQLGenerator().Generate(b, clauses.Into()) + case InsertSQLFragment: + isg.InsertSQL(b, clauses) + case ReturningSQLFragment: + isg.ReturningSQL(b, clauses.Returning()) + default: + b.SetError(ErrNotSupportedFragment("INSERT", f)) + } + } +} + +// Adds the correct fragment to being an INSERT statement +func (isg *insertSQLGenerator) InsertBeginSQL(b sb.SQLBuilder, o exp.ConflictExpression) { + if isg.DialectOptions().SupportsInsertIgnoreSyntax && o != nil { + b.Write(isg.DialectOptions().InsertIgnoreClause) + } else { + b.Write(isg.DialectOptions().InsertClause) + } +} + +// Adds the columns list to an insert statement +func (isg *insertSQLGenerator) InsertSQL(b sb.SQLBuilder, ic exp.InsertClauses) { + switch { + case ic.HasRows(): + ie, err := exp.NewInsertExpression(ic.Rows()...) + if err != nil { + b.SetError(err) + return + } + isg.InsertExpressionSQL(b, ie) + case ic.HasCols() && ic.HasVals(): + isg.insertColumnsSQL(b, ic.Cols()) + isg.insertValuesSQL(b, ic.Vals()) + case ic.HasCols() && ic.HasFrom(): + isg.insertColumnsSQL(b, ic.Cols()) + isg.insertFromSQL(b, ic.From()) + case ic.HasFrom(): + isg.insertFromSQL(b, ic.From()) + default: + isg.defaultValuesSQL(b) + } + if ic.HasAlias() { + b.Write(isg.DialectOptions().AsFragment) + isg.ExpressionSQLGenerator().Generate(b, ic.Alias()) + } + isg.onConflictSQL(b, ic.OnConflict()) +} + +func (isg *insertSQLGenerator) InsertExpressionSQL(b sb.SQLBuilder, ie exp.InsertExpression) { + switch { + case ie.IsInsertFrom(): + isg.insertFromSQL(b, ie.From()) + case ie.IsEmpty(): + isg.defaultValuesSQL(b) + default: + isg.insertColumnsSQL(b, ie.Cols()) + isg.insertValuesSQL(b, ie.Vals()) + } +} + +// Adds the DefaultValuesFragment to an SQL statement +func (isg *insertSQLGenerator) defaultValuesSQL(b sb.SQLBuilder) { + b.Write(isg.DialectOptions().DefaultValuesFragment) +} + +func (isg *insertSQLGenerator) insertFromSQL(b sb.SQLBuilder, ae exp.AppendableExpression) { + b.WriteRunes(isg.DialectOptions().SpaceRune) + ae.AppendSQL(b) +} + +// Adds the columns list to an insert statement +func (isg *insertSQLGenerator) insertColumnsSQL(b sb.SQLBuilder, cols exp.ColumnListExpression) { + b.WriteRunes(isg.DialectOptions().SpaceRune, isg.DialectOptions().LeftParenRune) + isg.ExpressionSQLGenerator().Generate(b, cols) + b.WriteRunes(isg.DialectOptions().RightParenRune) +} + +// Adds the values clause to an SQL statement +func (isg *insertSQLGenerator) insertValuesSQL(b sb.SQLBuilder, values [][]interface{}) { + b.Write(isg.DialectOptions().ValuesFragment) + rowLen := len(values[0]) + valueLen := len(values) + for i, row := range values { + if len(row) != rowLen { + b.SetError(errMisMatchedRowLength(rowLen, len(row))) + return + } + isg.ExpressionSQLGenerator().Generate(b, row) + if i < valueLen-1 { + b.WriteRunes(isg.DialectOptions().CommaRune, isg.DialectOptions().SpaceRune) + } + } +} + +// Adds the DefaultValuesFragment to an SQL statement +func (isg *insertSQLGenerator) onConflictSQL(b sb.SQLBuilder, o exp.ConflictExpression) { + if o == nil { + return + } + b.Write(isg.DialectOptions().ConflictFragment) + switch t := o.(type) { + case exp.ConflictUpdateExpression: + target := t.TargetColumn() + if isg.DialectOptions().SupportsConflictTarget && target != "" { + wrapParens := !strings.HasPrefix(strings.ToLower(target), "on constraint") + + b.WriteRunes(isg.DialectOptions().SpaceRune) + if wrapParens { + b.WriteRunes(isg.DialectOptions().LeftParenRune). + WriteStrings(target). + WriteRunes(isg.DialectOptions().RightParenRune) + } else { + b.Write([]byte(target)) + } + } + isg.onConflictDoUpdateSQL(b, t) + default: + b.Write(isg.DialectOptions().ConflictDoNothingFragment) + } +} + +func (isg *insertSQLGenerator) onConflictDoUpdateSQL(b sb.SQLBuilder, o exp.ConflictUpdateExpression) { + b.Write(isg.DialectOptions().ConflictDoUpdateFragment) + update := o.Update() + if update == nil { + b.SetError(ErrConflictUpdateValuesRequired) + return + } + ue, err := exp.NewUpdateExpressions(update) + if err != nil { + b.SetError(err) + return + } + isg.UpdateExpressionSQL(b, ue...) + if b.Error() == nil && o.WhereClause() != nil { + if !isg.DialectOptions().SupportsConflictUpdateWhere { + b.SetError(errUpsertWithWhereNotSupported(isg.Dialect())) + return + } + isg.WhereSQL(b, o.WhereClause()) + } +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sqlgen/select_sql_generator.go b/vendor/github.com/doug-martin/goqu/v9/sqlgen/select_sql_generator.go new file mode 100644 index 000000000..de322910e --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sqlgen/select_sql_generator.go @@ -0,0 +1,266 @@ +package sqlgen + +import ( + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +type ( + // An adapter interface to be used by a Dataset to generate SQL for a specific dialect. + // See DefaultAdapter for a concrete implementation and examples. + SelectSQLGenerator interface { + Dialect() string + Generate(b sb.SQLBuilder, clauses exp.SelectClauses) + } + // The default adapter. This class should be used when building a new adapter. When creating a new adapter you can + // either override methods, or more typically update default values. + // See (github.com/doug-martin/goqu/dialect/postgres) + selectSQLGenerator struct { + CommonSQLGenerator + } +) + +func ErrNotSupportedJoinType(j exp.JoinExpression) error { + return errors.New("dialect does not support %v", j.JoinType()) +} + +func ErrJoinConditionRequired(j exp.JoinExpression) error { + return errors.New("join condition required for conditioned join %v", j.JoinType()) +} + +func ErrDistinctOnNotSupported(dialect string) error { + return errors.New("dialect does not support DISTINCT ON clause [dialect=%s]", dialect) +} + +func ErrWindowNotSupported(dialect string) error { + return errors.New("dialect does not support WINDOW clause [dialect=%s]", dialect) +} + +var ErrNoWindowName = errors.New("window expresion has no valid name") + +func NewSelectSQLGenerator(dialect string, do *SQLDialectOptions) SelectSQLGenerator { + return &selectSQLGenerator{NewCommonSQLGenerator(dialect, do)} +} + +func (ssg *selectSQLGenerator) Generate(b sb.SQLBuilder, clauses exp.SelectClauses) { + for _, f := range ssg.DialectOptions().SelectSQLOrder { + if b.Error() != nil { + return + } + switch f { + case CommonTableSQLFragment: + ssg.ExpressionSQLGenerator().Generate(b, clauses.CommonTables()) + case SelectSQLFragment: + ssg.SelectSQL(b, clauses) + case SelectWithLimitSQLFragment: + ssg.SelectWithLimitSQL(b, clauses) + case FromSQLFragment: + ssg.FromSQL(b, clauses.From()) + case JoinSQLFragment: + ssg.JoinSQL(b, clauses.Joins()) + case WhereSQLFragment: + ssg.WhereSQL(b, clauses.Where()) + case GroupBySQLFragment: + ssg.GroupBySQL(b, clauses.GroupBy()) + case HavingSQLFragment: + ssg.HavingSQL(b, clauses.Having()) + case WindowSQLFragment: + ssg.WindowSQL(b, clauses.Windows()) + case CompoundsSQLFragment: + ssg.CompoundsSQL(b, clauses.Compounds()) + case OrderSQLFragment: + ssg.OrderSQL(b, clauses.Order()) + case OrderWithOffsetFetchSQLFragment: + ssg.OrderWithOffsetFetchSQL(b, clauses.Order(), clauses.Offset(), clauses.Limit()) + case LimitSQLFragment: + ssg.LimitSQL(b, clauses.Limit()) + case OffsetSQLFragment: + ssg.OffsetSQL(b, clauses.Offset()) + case ForSQLFragment: + ssg.ForSQL(b, clauses.Lock()) + default: + b.SetError(ErrNotSupportedFragment("SELECT", f)) + } + } +} + +func (ssg *selectSQLGenerator) selectSQLCommon(b sb.SQLBuilder, clauses exp.SelectClauses) { + dc := clauses.Distinct() + if dc != nil { + b.Write(ssg.DialectOptions().DistinctFragment) + if !dc.IsEmpty() { + if ssg.DialectOptions().SupportsDistinctOn { + b.Write(ssg.DialectOptions().OnFragment).WriteRunes(ssg.DialectOptions().LeftParenRune) + ssg.ExpressionSQLGenerator().Generate(b, dc) + b.WriteRunes(ssg.DialectOptions().RightParenRune, ssg.DialectOptions().SpaceRune) + } else { + b.SetError(ErrDistinctOnNotSupported(ssg.Dialect())) + return + } + } else { + b.WriteRunes(ssg.DialectOptions().SpaceRune) + } + } + + if cols := clauses.Select(); clauses.IsDefaultSelect() || len(cols.Columns()) == 0 { + b.WriteRunes(ssg.DialectOptions().StarRune) + } else { + ssg.ExpressionSQLGenerator().Generate(b, cols) + } +} + +// Adds the SELECT clause and columns to a sql statement +func (ssg *selectSQLGenerator) SelectSQL(b sb.SQLBuilder, clauses exp.SelectClauses) { + b.Write(ssg.DialectOptions().SelectClause).WriteRunes(ssg.DialectOptions().SpaceRune) + ssg.selectSQLCommon(b, clauses) +} + +// Adds the SELECT clause along with LIMIT to a SQL statement (e.g. MSSQL dialect: SELECT TOP 10 ...) +func (ssg *selectSQLGenerator) SelectWithLimitSQL(b sb.SQLBuilder, clauses exp.SelectClauses) { + b.Write(ssg.DialectOptions().SelectClause).WriteRunes(ssg.DialectOptions().SpaceRune) + if clauses.Offset() == 0 && clauses.Limit() != nil { + ssg.LimitSQL(b, clauses.Limit()) + b.WriteRunes(ssg.DialectOptions().SpaceRune) + } + ssg.selectSQLCommon(b, clauses) +} + +// Generates the JOIN clauses for an SQL statement +func (ssg *selectSQLGenerator) JoinSQL(b sb.SQLBuilder, joins exp.JoinExpressions) { + if len(joins) > 0 { + for _, j := range joins { + joinType, ok := ssg.DialectOptions().JoinTypeLookup[j.JoinType()] + if !ok { + b.SetError(ErrNotSupportedJoinType(j)) + return + } + b.Write(joinType) + ssg.ExpressionSQLGenerator().Generate(b, j.Table()) + if t, ok := j.(exp.ConditionedJoinExpression); ok { + if t.IsConditionEmpty() { + b.SetError(ErrJoinConditionRequired(j)) + return + } + ssg.joinConditionSQL(b, t.Condition()) + } + } + } +} + +// Generates the GROUP BY clause for an SQL statement +func (ssg *selectSQLGenerator) GroupBySQL(b sb.SQLBuilder, groupBy exp.ColumnListExpression) { + if groupBy != nil && len(groupBy.Columns()) > 0 { + b.Write(ssg.DialectOptions().GroupByFragment) + ssg.ExpressionSQLGenerator().Generate(b, groupBy) + } +} + +// Generates the HAVING clause for an SQL statement +func (ssg *selectSQLGenerator) HavingSQL(b sb.SQLBuilder, having exp.ExpressionList) { + if having != nil && len(having.Expressions()) > 0 { + b.Write(ssg.DialectOptions().HavingFragment) + ssg.ExpressionSQLGenerator().Generate(b, having) + } +} + +// Generates the OFFSET clause for an SQL statement +func (ssg *selectSQLGenerator) OffsetSQL(b sb.SQLBuilder, offset uint) { + if offset > 0 { + b.Write(ssg.DialectOptions().OffsetFragment) + ssg.ExpressionSQLGenerator().Generate(b, offset) + } +} + +// Generates the compound sql clause for an SQL statement (e.g. UNION, INTERSECT) +func (ssg *selectSQLGenerator) CompoundsSQL(b sb.SQLBuilder, compounds []exp.CompoundExpression) { + for _, compound := range compounds { + ssg.ExpressionSQLGenerator().Generate(b, compound) + } +} + +// Generates the FOR (aka "locking") clause for an SQL statement +func (ssg *selectSQLGenerator) ForSQL(b sb.SQLBuilder, lockingClause exp.Lock) { + if lockingClause == nil { + return + } + switch lockingClause.Strength() { + case exp.ForNolock: + return + case exp.ForUpdate: + b.Write(ssg.DialectOptions().ForUpdateFragment) + case exp.ForNoKeyUpdate: + b.Write(ssg.DialectOptions().ForNoKeyUpdateFragment) + case exp.ForShare: + b.Write(ssg.DialectOptions().ForShareFragment) + case exp.ForKeyShare: + b.Write(ssg.DialectOptions().ForKeyShareFragment) + } + + of := lockingClause.Of() + if ofLen := len(of); ofLen > 0 { + if ofFragment := ssg.DialectOptions().OfFragment; len(ofFragment) > 0 { + b.Write(ofFragment) + for i, table := range of { + ssg.ExpressionSQLGenerator().Generate(b, table) + if i < ofLen-1 { + b.WriteRunes(ssg.DialectOptions().CommaRune, ssg.DialectOptions().SpaceRune) + } + } + b.WriteRunes(ssg.DialectOptions().SpaceRune) + } + } + + // the WAIT case is the default in Postgres, and is what you get if you don't specify NOWAIT or + // SKIP LOCKED. There's no special syntax for it in PG, so we don't do anything for it here + switch lockingClause.WaitOption() { + case exp.Wait: + return + case exp.NoWait: + b.Write(ssg.DialectOptions().NowaitFragment) + case exp.SkipLocked: + b.Write(ssg.DialectOptions().SkipLockedFragment) + } +} + +func (ssg *selectSQLGenerator) WindowSQL(b sb.SQLBuilder, windows []exp.WindowExpression) { + weLen := len(windows) + if weLen == 0 { + return + } + if !ssg.DialectOptions().SupportsWindowFunction { + b.SetError(ErrWindowNotSupported(ssg.Dialect())) + return + } + b.Write(ssg.DialectOptions().WindowFragment) + for i, we := range windows { + if !we.HasName() { + b.SetError(ErrNoWindowName) + } + ssg.ExpressionSQLGenerator().Generate(b, we) + if i < weLen-1 { + b.WriteRunes(ssg.DialectOptions().CommaRune, ssg.DialectOptions().SpaceRune) + } + } +} + +func (ssg *selectSQLGenerator) joinConditionSQL(b sb.SQLBuilder, jc exp.JoinCondition) { + switch t := jc.(type) { + case exp.JoinOnCondition: + ssg.joinOnConditionSQL(b, t) + case exp.JoinUsingCondition: + ssg.joinUsingConditionSQL(b, t) + } +} + +func (ssg *selectSQLGenerator) joinUsingConditionSQL(b sb.SQLBuilder, jc exp.JoinUsingCondition) { + b.Write(ssg.DialectOptions().UsingFragment). + WriteRunes(ssg.DialectOptions().LeftParenRune) + ssg.ExpressionSQLGenerator().Generate(b, jc.Using()) + b.WriteRunes(ssg.DialectOptions().RightParenRune) +} + +func (ssg *selectSQLGenerator) joinOnConditionSQL(b sb.SQLBuilder, jc exp.JoinOnCondition) { + b.Write(ssg.DialectOptions().OnFragment) + ssg.ExpressionSQLGenerator().Generate(b, jc.On()) +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sqlgen/sql_dialect_options.go b/vendor/github.com/doug-martin/goqu/v9/sqlgen/sql_dialect_options.go new file mode 100644 index 000000000..a0df394e4 --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sqlgen/sql_dialect_options.go @@ -0,0 +1,607 @@ +package sqlgen + +import ( + "fmt" + "time" + + "github.com/doug-martin/goqu/v9/exp" +) + +type ( + SQLFragmentType int + SQLDialectOptions struct { + // Set to true if the dialect supports ORDER BY expressions in DELETE statements (DEFAULT=false) + SupportsOrderByOnDelete bool + // Set to true if the dialect supports table hint for DELETE statements (DELETE t FROM t ...), DEFAULT=false + SupportsDeleteTableHint bool + // Set to true if the dialect supports ORDER BY expressions in UPDATE statements (DEFAULT=false) + SupportsOrderByOnUpdate bool + // Set to true if the dialect supports LIMIT expressions in DELETE statements (DEFAULT=false) + SupportsLimitOnDelete bool + // Set to true if the dialect supports LIMIT expressions in UPDATE statements (DEFAULT=false) + SupportsLimitOnUpdate bool + // Set to true if the dialect supports RETURN expressions (DEFAULT=true) + SupportsReturn bool + // Set to true if the dialect supports Conflict Target (DEFAULT=true) + SupportsConflictTarget bool + // Set to true if the dialect supports Conflict Target (DEFAULT=true) + SupportsConflictUpdateWhere bool + // Set to true if the dialect supports Insert Ignore syntax (DEFAULT=false) + SupportsInsertIgnoreSyntax bool + // Set to true if the dialect supports Common Table Expressions (DEFAULT=true) + SupportsWithCTE bool + // Set to true if the dialect supports recursive Common Table Expressions (DEFAULT=true) + SupportsWithCTERecursive bool + // Set to true if multiple tables are supported in UPDATE statement. (DEFAULT=true) + SupportsMultipleUpdateTables bool + // Set to true if DISTINCT ON is supported (DEFAULT=true) + SupportsDistinctOn bool + // Set to true if LATERAL queries are supported (DEFAULT=true) + SupportsLateral bool + // Set to false if the dialect does not require expressions to be wrapped in parens (DEFAULT=true) + WrapCompoundsInParens bool + + // Set to true if window function are supported in SELECT statement. (DEFAULT=true) + SupportsWindowFunction bool + + // Set to true if the dialect requires join tables in UPDATE to be in a FROM clause (DEFAULT=true). + UseFromClauseForMultipleUpdateTables bool + + // Surround LIMIT parameter with parentheses, like in MSSQL: SELECT TOP (10) ... + SurroundLimitWithParentheses bool + + // The UPDATE fragment to use when generating sql. (DEFAULT=[]byte("UPDATE")) + UpdateClause []byte + // The INSERT fragment to use when generating sql. (DEFAULT=[]byte("INSERT INTO")) + InsertClause []byte + // The INSERT IGNORE INTO fragment to use when generating sql. (DEFAULT=[]byte("INSERT IGNORE INTO")) + InsertIgnoreClause []byte + // The SELECT fragment to use when generating sql. (DEFAULT=[]byte("SELECT")) + SelectClause []byte + // The DELETE fragment to use when generating sql. (DEFAULT=[]byte("DELETE")) + DeleteClause []byte + // The TRUNCATE fragment to use when generating sql. (DEFAULT=[]byte("TRUNCATE")) + TruncateClause []byte + // The WITH fragment to use when generating sql. (DEFAULT=[]byte("WITH ")) + WithFragment []byte + // The RECURSIVE fragment to use when generating sql (after WITH). (DEFAULT=[]byte("RECURSIVE ")) + RecursiveFragment []byte + // The CASCADE fragment to use when generating sql. (DEFAULT=[]byte(" CASCADE")) + CascadeFragment []byte + // The RESTRICT fragment to use when generating sql. (DEFAULT=[]byte(" RESTRICT")) + RestrictFragment []byte + // The SQL fragment to use when generating insert sql and using + // DEFAULT VALUES (e.g. postgres="DEFAULT VALUES", mysql="", sqlite3=""). (DEFAULT=[]byte(" DEFAULT VALUES")) + DefaultValuesFragment []byte + // The SQL fragment to use when generating insert sql and listing columns using a VALUES clause + // (DEFAULT=[]byte(" VALUES ")) + ValuesFragment []byte + // The SQL fragment to use when generating truncate sql and using the IDENTITY clause + // (DEFAULT=[]byte(" IDENTITY")) + IdentityFragment []byte + // The SQL fragment to use when generating update sql and using the SET clause (DEFAULT=[]byte(" SET ")) + SetFragment []byte + // The SQL DISTINCT keyword (DEFAULT=[]byte(" DISTINCT ")) + DistinctFragment []byte + // The SQL RETURNING clause (DEFAULT=[]byte(" RETURNING ")) + ReturningFragment []byte + // The SQL FROM clause fragment (DEFAULT=[]byte(" FROM")) + FromFragment []byte + // The SQL USING join clause fragment (DEFAULT=[]byte(" USING ")) + UsingFragment []byte + // The SQL ON join clause fragment (DEFAULT=[]byte(" ON ")) + OnFragment []byte + // The SQL WHERE clause fragment (DEFAULT=[]byte(" WHERE ")) + WhereFragment []byte + // The SQL GROUP BY clause fragment(DEFAULT=[]byte(" GROUP BY ")) + GroupByFragment []byte + // The SQL HAVING clause fragment(DEFAULT=[]byte(" HAVING ")) + HavingFragment []byte + // The SQL WINDOW clause fragment(DEFAULT=[]byte(" WINDOW ")) + WindowFragment []byte + // The SQL WINDOW clause PARTITION BY fragment(DEFAULT=[]byte("PARTITION BY ")) + WindowPartitionByFragment []byte + // The SQL WINDOW clause ORDER BY fragment(DEFAULT=[]byte("ORDER BY ")) + WindowOrderByFragment []byte + // The SQL WINDOW clause OVER fragment(DEFAULT=[]byte(" OVER ")) + WindowOverFragment []byte + // The SQL ORDER BY clause fragment(DEFAULT=[]byte(" ORDER BY ")) + OrderByFragment []byte + // The SQL FETCH fragment(DEFAULT=[]byte(" ")) + FetchFragment []byte + // The SQL LIMIT BY clause fragment(DEFAULT=[]byte(" LIMIT ")) + LimitFragment []byte + // The SQL OFFSET BY clause fragment(DEFAULT=[]byte(" OFFSET ")) + OffsetFragment []byte + // The SQL FOR UPDATE fragment(DEFAULT=[]byte(" FOR UPDATE ")) + ForUpdateFragment []byte + // The SQL FOR NO KEY UPDATE fragment(DEFAULT=[]byte(" FOR NO KEY UPDATE ")) + ForNoKeyUpdateFragment []byte + // The SQL FOR SHARE fragment(DEFAULT=[]byte(" FOR SHARE ")) + ForShareFragment []byte + // The SQL OF fragment(DEFAULT=[]byte("OF ")) + OfFragment []byte + // The SQL FOR KEY SHARE fragment(DEFAULT=[]byte(" FOR KEY SHARE ")) + ForKeyShareFragment []byte + // The SQL NOWAIT fragment(DEFAULT=[]byte("NOWAIT")) + NowaitFragment []byte + // The SQL SKIP LOCKED fragment(DEFAULT=[]byte("SKIP LOCKED")) + SkipLockedFragment []byte + // The SQL AS fragment when aliasing an Expression(DEFAULT=[]byte(" AS ")) + AsFragment []byte + // The SQL LATERAL fragment used for LATERAL joins + LateralFragment []byte + // The quote rune to use when quoting identifiers(DEFAULT='"') + QuoteRune rune + // The NULL literal to use when interpolating nulls values (DEFAULT=[]byte("NULL")) + Null []byte + // The TRUE literal to use when interpolating bool true values (DEFAULT=[]byte("TRUE")) + True []byte + // The FALSE literal to use when interpolating bool false values (DEFAULT=[]byte("FALSE")) + False []byte + // The ASC fragment when specifying column order (DEFAULT=[]byte(" ASC")) + AscFragment []byte + // The DESC fragment when specifying column order (DEFAULT=[]byte(" DESC")) + DescFragment []byte + // The NULLS FIRST fragment when specifying column order (DEFAULT=[]byte(" NULLS FIRST")) + NullsFirstFragment []byte + // The NULLS LAST fragment when specifying column order (DEFAULT=[]byte(" NULLS LAST")) + NullsLastFragment []byte + // The AND keyword used when joining ExpressionLists (DEFAULT=[]byte(" AND ")) + AndFragment []byte + // The OR keyword used when joining ExpressionLists (DEFAULT=[]byte(" OR ")) + OrFragment []byte + // The UNION keyword used when creating compound statements (DEFAULT=[]byte(" UNION ")) + UnionFragment []byte + // The UNION ALL keyword used when creating compound statements (DEFAULT=[]byte(" UNION ALL ")) + UnionAllFragment []byte + // The INTERSECT keyword used when creating compound statements (DEFAULT=[]byte(" INTERSECT ")) + IntersectFragment []byte + // The INTERSECT ALL keyword used when creating compound statements (DEFAULT=[]byte(" INTERSECT ALL ")) + IntersectAllFragment []byte + // The CAST keyword to use when casting a value (DEFAULT=[]byte("CAST")) + CastFragment []byte + // The CASE keyword to use when when creating a CASE statement (DEFAULT=[]byte("CASE ")) + CaseFragment []byte + // The WHEN keyword to use when when creating a CASE statement (DEFAULT=[]byte(" WHEN ")) + WhenFragment []byte + // The THEN keyword to use when when creating a CASE statement (DEFAULT=[]byte(" THEN ")) + ThenFragment []byte + // The ELSE keyword to use when when creating a CASE statement (DEFAULT=[]byte(" ELSE ")) + ElseFragment []byte + // The End keyword to use when when creating a CASE statement (DEFAULT=[]byte(" END")) + EndFragment []byte + // The quote rune to use when quoting string literals (DEFAULT='\'') + StringQuote rune + // The operator to use when setting values in an update statement (DEFAULT='=') + SetOperatorRune rune + // The placeholder fragment to use when generating a non interpolated statement (DEFAULT=[]byte"?") + PlaceHolderFragment []byte + // Empty string (DEFAULT="") + EmptyString string + // Comma rune (DEFAULT=',') + CommaRune rune + // Space rune (DEFAULT=' ') + SpaceRune rune + // Left paren rune (DEFAULT='(') + LeftParenRune rune + // Right paren rune (DEFAULT=')') + RightParenRune rune + // Star rune (DEFAULT='*') + StarRune rune + // Period rune (DEFAULT='.') + PeriodRune rune + // Set to true to include positional argument numbers when creating a prepared statement (Default=false) + IncludePlaceholderNum bool + // The time format to use when serializing time.Time (DEFAULT=time.RFC3339Nano) + TimeFormat string + // A map used to look up BooleanOperations and their SQL equivalents + // (Default= map[exp.BooleanOperation][]byte{ + // exp.EqOp: []byte("="), + // exp.NeqOp: []byte("!="), + // exp.GtOp: []byte(">"), + // exp.GteOp: []byte(">="), + // exp.LtOp: []byte("<"), + // exp.LteOp: []byte("<="), + // exp.InOp: []byte("IN"), + // exp.NotInOp: []byte("NOT IN"), + // exp.IsOp: []byte("IS"), + // exp.IsNotOp: []byte("IS NOT"), + // exp.LikeOp: []byte("LIKE"), + // exp.NotLikeOp: []byte("NOT LIKE"), + // exp.ILikeOp: []byte("ILIKE"), + // exp.NotILikeOp: []byte("NOT ILIKE"), + // exp.RegexpLikeOp: []byte("~"), + // exp.RegexpNotLikeOp: []byte("!~"), + // exp.RegexpILikeOp: []byte("~*"), + // exp.RegexpNotILikeOp: []byte("!~*"), + // }) + BooleanOperatorLookup map[exp.BooleanOperation][]byte + // A map used to look up BitwiseOperations and their SQL equivalents + // (Default=map[exp.BitwiseOperation][]byte{ + // exp.BitwiseInversionOp: []byte("~"), + // exp.BitwiseOrOp: []byte("|"), + // exp.BitwiseAndOp: []byte("&"), + // exp.BitwiseXorOp: []byte("#"), + // exp.BitwiseLeftShiftOp: []byte("<<"), + // exp.BitwiseRightShiftOp: []byte(">>"), + // }), + BitwiseOperatorLookup map[exp.BitwiseOperation][]byte + // A map used to look up RangeOperations and their SQL equivalents + // (Default=map[exp.RangeOperation][]byte{ + // exp.BetweenOp: []byte("BETWEEN"), + // exp.NotBetweenOp: []byte("NOT BETWEEN"), + // }) + RangeOperatorLookup map[exp.RangeOperation][]byte + // A map used to look up JoinTypes and their SQL equivalents + // (Default= map[exp.JoinType][]byte{ + // exp.InnerJoinType: []byte(" INNER JOIN "), + // exp.FullOuterJoinType: []byte(" FULL OUTER JOIN "), + // exp.RightOuterJoinType: []byte(" RIGHT OUTER JOIN "), + // exp.LeftOuterJoinType: []byte(" LEFT OUTER JOIN "), + // exp.FullJoinType: []byte(" FULL JOIN "), + // exp.RightJoinType: []byte(" RIGHT JOIN "), + // exp.LeftJoinType: []byte(" LEFT JOIN "), + // exp.NaturalJoinType: []byte(" NATURAL JOIN "), + // exp.NaturalLeftJoinType: []byte(" NATURAL LEFT JOIN "), + // exp.NaturalRightJoinType: []byte(" NATURAL RIGHT JOIN "), + // exp.NaturalFullJoinType: []byte(" NATURAL FULL JOIN "), + // exp.CrossJoinType: []byte(" CROSS JOIN "), + // }) + JoinTypeLookup map[exp.JoinType][]byte + // Whether or not boolean data type is supported + BooleanDataTypeSupported bool + // Whether or not to use literal TRUE or FALSE for IS statements (e.g. IS TRUE or IS 0) + UseLiteralIsBools bool + // EscapedRunes is a map of a rune and the corresponding escape sequence in bytes. Used when escaping text + // types. + // (Default= map[rune][]byte{ + // '\'': []byte("''"), + // }) + EscapedRunes map[rune][]byte + + // The SQL fragment to use for CONFLICT (Default=[]byte(" ON CONFLICT")) + ConflictFragment []byte + // The SQL fragment to use for CONFLICT DO NOTHING (Default=[]byte(" DO NOTHING")) + ConflictDoNothingFragment []byte + // The SQL fragment to use for CONFLICT DO UPDATE (Default=[]byte(" DO UPDATE SET")) + ConflictDoUpdateFragment []byte + + // The order of SQL fragments when creating a SELECT statement + // (Default=[]SQLFragmentType{ + // CommonTableSQLFragment, + // SelectSQLFragment, + // FromSQLFragment, + // JoinSQLFragment, + // WhereSQLFragment, + // GroupBySQLFragment, + // HavingSQLFragment, + // CompoundsSQLFragment, + // OrderSQLFragment, + // LimitSQLFragment, + // OffsetSQLFragment, + // ForSQLFragment, + // }) + SelectSQLOrder []SQLFragmentType + + // The order of SQL fragments when creating an UPDATE statement + // (Default=[]SQLFragmentType{ + // CommonTableSQLFragment, + // UpdateBeginSQLFragment, + // SourcesSQLFragment, + // UpdateSQLFragment, + // WhereSQLFragment, + // OrderSQLFragment, + // LimitSQLFragment, + // ReturningSQLFragment, + // }) + UpdateSQLOrder []SQLFragmentType + + // The order of SQL fragments when creating an INSERT statement + // (Default=[]SQLFragmentType{ + // CommonTableSQLFragment, + // InsertBeingSQLFragment, + // SourcesSQLFragment, + // InsertSQLFragment, + // ReturningSQLFragment, + // }) + InsertSQLOrder []SQLFragmentType + + // The order of SQL fragments when creating a DELETE statement + // (Default=[]SQLFragmentType{ + // CommonTableSQLFragment, + // DeleteBeginSQLFragment, + // FromSQLFragment, + // WhereSQLFragment, + // OrderSQLFragment, + // LimitSQLFragment, + // ReturningSQLFragment, + // }) + DeleteSQLOrder []SQLFragmentType + + // The order of SQL fragments when creating a TRUNCATE statement + // (Default=[]SQLFragmentType{ + // TruncateSQLFragment, + // }) + TruncateSQLOrder []SQLFragmentType + } +) + +const ( + CommonTableSQLFragment = iota + SelectSQLFragment + SelectWithLimitSQLFragment + FromSQLFragment + JoinSQLFragment + WhereSQLFragment + GroupBySQLFragment + HavingSQLFragment + CompoundsSQLFragment + OrderSQLFragment + OrderWithOffsetFetchSQLFragment + LimitSQLFragment + OffsetSQLFragment + ForSQLFragment + UpdateBeginSQLFragment + SourcesSQLFragment + IntoSQLFragment + UpdateSQLFragment + UpdateFromSQLFragment + ReturningSQLFragment + InsertBeingSQLFragment + InsertSQLFragment + DeleteBeginSQLFragment + TruncateSQLFragment + WindowSQLFragment +) + +// nolint:gocyclo // simple type to string conversion +func (sf SQLFragmentType) String() string { + switch sf { + case CommonTableSQLFragment: + return "CommonTableSQLFragment" + case SelectSQLFragment: + return "SelectSQLFragment" + case FromSQLFragment: + return "FromSQLFragment" + case JoinSQLFragment: + return "JoinSQLFragment" + case WhereSQLFragment: + return "WhereSQLFragment" + case GroupBySQLFragment: + return "GroupBySQLFragment" + case HavingSQLFragment: + return "HavingSQLFragment" + case CompoundsSQLFragment: + return "CompoundsSQLFragment" + case OrderSQLFragment: + return "OrderSQLFragment" + case LimitSQLFragment: + return "LimitSQLFragment" + case OffsetSQLFragment: + return "OffsetSQLFragment" + case ForSQLFragment: + return "ForSQLFragment" + case UpdateBeginSQLFragment: + return "UpdateBeginSQLFragment" + case SourcesSQLFragment: + return "SourcesSQLFragment" + case IntoSQLFragment: + return "IntoSQLFragment" + case UpdateSQLFragment: + return "UpdateSQLFragment" + case UpdateFromSQLFragment: + return "UpdateFromSQLFragment" + case ReturningSQLFragment: + return "ReturningSQLFragment" + case InsertBeingSQLFragment: + return "InsertBeingSQLFragment" + case DeleteBeginSQLFragment: + return "DeleteBeginSQLFragment" + case TruncateSQLFragment: + return "TruncateSQLFragment" + case WindowSQLFragment: + return "WindowSQLFragment" + } + return fmt.Sprintf("%d", sf) +} + +//nolint:funlen +func DefaultDialectOptions() *SQLDialectOptions { + return &SQLDialectOptions{ + SupportsOrderByOnDelete: false, + SupportsDeleteTableHint: false, + SupportsOrderByOnUpdate: false, + SupportsLimitOnDelete: false, + SupportsLimitOnUpdate: false, + SupportsReturn: true, + SupportsConflictUpdateWhere: true, + SupportsInsertIgnoreSyntax: false, + SupportsConflictTarget: true, + SupportsWithCTE: true, + SupportsWithCTERecursive: true, + SupportsDistinctOn: true, + WrapCompoundsInParens: true, + SupportsWindowFunction: true, + SupportsLateral: true, + + SupportsMultipleUpdateTables: true, + UseFromClauseForMultipleUpdateTables: true, + + UpdateClause: []byte("UPDATE"), + InsertClause: []byte("INSERT INTO"), + InsertIgnoreClause: []byte("INSERT IGNORE INTO"), + SelectClause: []byte("SELECT"), + DeleteClause: []byte("DELETE"), + TruncateClause: []byte("TRUNCATE"), + WithFragment: []byte("WITH "), + RecursiveFragment: []byte("RECURSIVE "), + CascadeFragment: []byte(" CASCADE"), + RestrictFragment: []byte(" RESTRICT"), + DefaultValuesFragment: []byte(" DEFAULT VALUES"), + ValuesFragment: []byte(" VALUES "), + IdentityFragment: []byte(" IDENTITY"), + SetFragment: []byte(" SET "), + DistinctFragment: []byte("DISTINCT"), + ReturningFragment: []byte(" RETURNING "), + FromFragment: []byte(" FROM"), + UsingFragment: []byte(" USING "), + OnFragment: []byte(" ON "), + WhereFragment: []byte(" WHERE "), + GroupByFragment: []byte(" GROUP BY "), + HavingFragment: []byte(" HAVING "), + WindowFragment: []byte(" WINDOW "), + WindowPartitionByFragment: []byte("PARTITION BY "), + WindowOrderByFragment: []byte("ORDER BY "), + WindowOverFragment: []byte(" OVER "), + OrderByFragment: []byte(" ORDER BY "), + FetchFragment: []byte(" "), + LimitFragment: []byte(" LIMIT "), + OffsetFragment: []byte(" OFFSET "), + ForUpdateFragment: []byte(" FOR UPDATE "), + ForNoKeyUpdateFragment: []byte(" FOR NO KEY UPDATE "), + ForShareFragment: []byte(" FOR SHARE "), + ForKeyShareFragment: []byte(" FOR KEY SHARE "), + OfFragment: []byte("OF "), + NowaitFragment: []byte("NOWAIT"), + SkipLockedFragment: []byte("SKIP LOCKED"), + LateralFragment: []byte("LATERAL "), + AsFragment: []byte(" AS "), + AscFragment: []byte(" ASC"), + DescFragment: []byte(" DESC"), + NullsFirstFragment: []byte(" NULLS FIRST"), + NullsLastFragment: []byte(" NULLS LAST"), + AndFragment: []byte(" AND "), + OrFragment: []byte(" OR "), + UnionFragment: []byte(" UNION "), + UnionAllFragment: []byte(" UNION ALL "), + IntersectFragment: []byte(" INTERSECT "), + IntersectAllFragment: []byte(" INTERSECT ALL "), + ConflictFragment: []byte(" ON CONFLICT"), + ConflictDoUpdateFragment: []byte(" DO UPDATE SET "), + ConflictDoNothingFragment: []byte(" DO NOTHING"), + CastFragment: []byte("CAST"), + CaseFragment: []byte("CASE "), + WhenFragment: []byte(" WHEN "), + ThenFragment: []byte(" THEN "), + ElseFragment: []byte(" ELSE "), + EndFragment: []byte(" END"), + Null: []byte("NULL"), + True: []byte("TRUE"), + False: []byte("FALSE"), + + PlaceHolderFragment: []byte("?"), + QuoteRune: '"', + StringQuote: '\'', + SetOperatorRune: '=', + CommaRune: ',', + SpaceRune: ' ', + LeftParenRune: '(', + RightParenRune: ')', + StarRune: '*', + PeriodRune: '.', + EmptyString: "", + + BooleanOperatorLookup: map[exp.BooleanOperation][]byte{ + exp.EqOp: []byte("="), + exp.NeqOp: []byte("!="), + exp.GtOp: []byte(">"), + exp.GteOp: []byte(">="), + exp.LtOp: []byte("<"), + exp.LteOp: []byte("<="), + exp.InOp: []byte("IN"), + exp.NotInOp: []byte("NOT IN"), + exp.IsOp: []byte("IS"), + exp.IsNotOp: []byte("IS NOT"), + exp.LikeOp: []byte("LIKE"), + exp.NotLikeOp: []byte("NOT LIKE"), + exp.ILikeOp: []byte("ILIKE"), + exp.NotILikeOp: []byte("NOT ILIKE"), + exp.RegexpLikeOp: []byte("~"), + exp.RegexpNotLikeOp: []byte("!~"), + exp.RegexpILikeOp: []byte("~*"), + exp.RegexpNotILikeOp: []byte("!~*"), + }, + BitwiseOperatorLookup: map[exp.BitwiseOperation][]byte{ + exp.BitwiseInversionOp: []byte("~"), + exp.BitwiseOrOp: []byte("|"), + exp.BitwiseAndOp: []byte("&"), + exp.BitwiseXorOp: []byte("#"), + exp.BitwiseLeftShiftOp: []byte("<<"), + exp.BitwiseRightShiftOp: []byte(">>"), + }, + RangeOperatorLookup: map[exp.RangeOperation][]byte{ + exp.BetweenOp: []byte("BETWEEN"), + exp.NotBetweenOp: []byte("NOT BETWEEN"), + }, + JoinTypeLookup: map[exp.JoinType][]byte{ + exp.InnerJoinType: []byte(" INNER JOIN "), + exp.FullOuterJoinType: []byte(" FULL OUTER JOIN "), + exp.RightOuterJoinType: []byte(" RIGHT OUTER JOIN "), + exp.LeftOuterJoinType: []byte(" LEFT OUTER JOIN "), + exp.FullJoinType: []byte(" FULL JOIN "), + exp.RightJoinType: []byte(" RIGHT JOIN "), + exp.LeftJoinType: []byte(" LEFT JOIN "), + exp.NaturalJoinType: []byte(" NATURAL JOIN "), + exp.NaturalLeftJoinType: []byte(" NATURAL LEFT JOIN "), + exp.NaturalRightJoinType: []byte(" NATURAL RIGHT JOIN "), + exp.NaturalFullJoinType: []byte(" NATURAL FULL JOIN "), + exp.CrossJoinType: []byte(" CROSS JOIN "), + }, + + TimeFormat: time.RFC3339Nano, + + BooleanDataTypeSupported: true, + UseLiteralIsBools: true, + + EscapedRunes: map[rune][]byte{ + '\'': []byte("''"), + }, + + SelectSQLOrder: []SQLFragmentType{ + CommonTableSQLFragment, + SelectSQLFragment, + FromSQLFragment, + JoinSQLFragment, + WhereSQLFragment, + GroupBySQLFragment, + HavingSQLFragment, + WindowSQLFragment, + CompoundsSQLFragment, + OrderSQLFragment, + LimitSQLFragment, + OffsetSQLFragment, + ForSQLFragment, + }, + UpdateSQLOrder: []SQLFragmentType{ + CommonTableSQLFragment, + UpdateBeginSQLFragment, + SourcesSQLFragment, + UpdateSQLFragment, + UpdateFromSQLFragment, + WhereSQLFragment, + OrderSQLFragment, + LimitSQLFragment, + ReturningSQLFragment, + }, + InsertSQLOrder: []SQLFragmentType{ + CommonTableSQLFragment, + InsertBeingSQLFragment, + IntoSQLFragment, + InsertSQLFragment, + ReturningSQLFragment, + }, + DeleteSQLOrder: []SQLFragmentType{ + CommonTableSQLFragment, + DeleteBeginSQLFragment, + FromSQLFragment, + WhereSQLFragment, + OrderSQLFragment, + LimitSQLFragment, + ReturningSQLFragment, + }, + TruncateSQLOrder: []SQLFragmentType{ + TruncateSQLFragment, + }, + } +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sqlgen/sqlgen.go b/vendor/github.com/doug-martin/goqu/v9/sqlgen/sqlgen.go new file mode 100644 index 000000000..09757115b --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sqlgen/sqlgen.go @@ -0,0 +1,15 @@ +package sqlgen + +import "time" + +var timeLocation = time.UTC + +// Set the location to use when interpolating time.Time instances. See https://golang.org/pkg/time/#LoadLocation +// NOTE: This has no effect when using prepared statements. +func SetTimeLocation(loc *time.Location) { + timeLocation = loc +} + +func GetTimeLocation() *time.Location { + return timeLocation +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sqlgen/truncate_sql_generator.go b/vendor/github.com/doug-martin/goqu/v9/sqlgen/truncate_sql_generator.go new file mode 100644 index 000000000..a2a6b41bb --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sqlgen/truncate_sql_generator.go @@ -0,0 +1,64 @@ +package sqlgen + +import ( + "strings" + + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +type ( + // An adapter interface to be used by a Dataset to generate SQL for a specific dialect. + // See DefaultAdapter for a concrete implementation and examples. + TruncateSQLGenerator interface { + Dialect() string + Generate(b sb.SQLBuilder, clauses exp.TruncateClauses) + } + // The default adapter. This class should be used when building a new adapter. When creating a new adapter you can + // either override methods, or more typically update default values. + // See (github.com/doug-martin/goqu/dialect/postgres) + truncateSQLGenerator struct { + CommonSQLGenerator + } +) + +var errNoSourceForTruncate = errors.New("no source found when generating truncate sql") + +func NewTruncateSQLGenerator(dialect string, do *SQLDialectOptions) TruncateSQLGenerator { + return &truncateSQLGenerator{NewCommonSQLGenerator(dialect, do)} +} + +func (tsg *truncateSQLGenerator) Generate(b sb.SQLBuilder, clauses exp.TruncateClauses) { + if !clauses.HasTable() { + b.SetError(errNoSourceForTruncate) + return + } + for _, f := range tsg.DialectOptions().TruncateSQLOrder { + if b.Error() != nil { + return + } + switch f { + case TruncateSQLFragment: + tsg.TruncateSQL(b, clauses.Table(), clauses.Options()) + default: + b.SetError(ErrNotSupportedFragment("TRUNCATE", f)) + } + } +} + +// Generates a TRUNCATE statement +func (tsg *truncateSQLGenerator) TruncateSQL(b sb.SQLBuilder, from exp.ColumnListExpression, opts exp.TruncateOptions) { + b.Write(tsg.DialectOptions().TruncateClause) + tsg.SourcesSQL(b, from) + if opts.Identity != tsg.DialectOptions().EmptyString { + b.WriteRunes(tsg.DialectOptions().SpaceRune). + WriteStrings(strings.ToUpper(opts.Identity)). + Write(tsg.DialectOptions().IdentityFragment) + } + if opts.Cascade { + b.Write(tsg.DialectOptions().CascadeFragment) + } else if opts.Restrict { + b.Write(tsg.DialectOptions().RestrictFragment) + } +} diff --git a/vendor/github.com/doug-martin/goqu/v9/sqlgen/update_sql_generator.go b/vendor/github.com/doug-martin/goqu/v9/sqlgen/update_sql_generator.go new file mode 100644 index 000000000..3bf7b88ef --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/sqlgen/update_sql_generator.go @@ -0,0 +1,112 @@ +package sqlgen + +import ( + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +type ( + // An adapter interface to be used by a Dataset to generate SQL for a specific dialect. + // See DefaultAdapter for a concrete implementation and examples. + UpdateSQLGenerator interface { + Dialect() string + Generate(b sb.SQLBuilder, clauses exp.UpdateClauses) + } + // The default adapter. This class should be used when building a new adapter. When creating a new adapter you can + // either override methods, or more typically update default values. + // See (github.com/doug-martin/goqu/dialect/postgres) + updateSQLGenerator struct { + CommonSQLGenerator + } +) + +var ( + ErrNoSourceForUpdate = errors.New("no source found when generating update sql") + ErrNoSetValuesForUpdate = errors.New("no set values found when generating UPDATE sql") +) + +func NewUpdateSQLGenerator(dialect string, do *SQLDialectOptions) UpdateSQLGenerator { + return &updateSQLGenerator{NewCommonSQLGenerator(dialect, do)} +} + +func (usg *updateSQLGenerator) Generate(b sb.SQLBuilder, clauses exp.UpdateClauses) { + if !clauses.HasTable() { + b.SetError(ErrNoSourceForUpdate) + return + } + if !clauses.HasSetValues() { + b.SetError(ErrNoSetValuesForUpdate) + return + } + if !usg.DialectOptions().SupportsMultipleUpdateTables && clauses.HasFrom() { + b.SetError(errors.New("%s dialect does not support multiple tables in UPDATE", usg.Dialect())) + } + updates, err := exp.NewUpdateExpressions(clauses.SetValues()) + if err != nil { + b.SetError(err) + return + } + for _, f := range usg.DialectOptions().UpdateSQLOrder { + if b.Error() != nil { + return + } + switch f { + case CommonTableSQLFragment: + usg.ExpressionSQLGenerator().Generate(b, clauses.CommonTables()) + case UpdateBeginSQLFragment: + usg.UpdateBeginSQL(b) + case SourcesSQLFragment: + usg.updateTableSQL(b, clauses) + case UpdateSQLFragment: + usg.UpdateExpressionsSQL(b, updates...) + case UpdateFromSQLFragment: + usg.updateFromSQL(b, clauses.From()) + case WhereSQLFragment: + usg.WhereSQL(b, clauses.Where()) + case OrderSQLFragment: + if usg.DialectOptions().SupportsOrderByOnUpdate { + usg.OrderSQL(b, clauses.Order()) + } + case LimitSQLFragment: + if usg.DialectOptions().SupportsLimitOnUpdate { + usg.LimitSQL(b, clauses.Limit()) + } + case ReturningSQLFragment: + usg.ReturningSQL(b, clauses.Returning()) + default: + b.SetError(ErrNotSupportedFragment("UPDATE", f)) + } + } +} + +// Adds the correct fragment to being an UPDATE statement +func (usg *updateSQLGenerator) UpdateBeginSQL(b sb.SQLBuilder) { + b.Write(usg.DialectOptions().UpdateClause) +} + +// Adds column setters in an update SET clause +func (usg *updateSQLGenerator) UpdateExpressionsSQL(b sb.SQLBuilder, updates ...exp.UpdateExpression) { + b.Write(usg.DialectOptions().SetFragment) + usg.UpdateExpressionSQL(b, updates...) +} + +func (usg *updateSQLGenerator) updateTableSQL(b sb.SQLBuilder, uc exp.UpdateClauses) { + b.WriteRunes(usg.DialectOptions().SpaceRune) + usg.ExpressionSQLGenerator().Generate(b, uc.Table()) + if uc.HasFrom() { + if !usg.DialectOptions().UseFromClauseForMultipleUpdateTables { + b.WriteRunes(usg.DialectOptions().CommaRune) + usg.ExpressionSQLGenerator().Generate(b, uc.From()) + } + } +} + +func (usg *updateSQLGenerator) updateFromSQL(b sb.SQLBuilder, ce exp.ColumnListExpression) { + if ce == nil || ce.IsEmpty() { + return + } + if usg.DialectOptions().UseFromClauseForMultipleUpdateTables { + usg.FromSQL(b, ce) + } +} diff --git a/vendor/github.com/doug-martin/goqu/v9/truncate_dataset.go b/vendor/github.com/doug-martin/goqu/v9/truncate_dataset.go new file mode 100644 index 000000000..fda8196bd --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/truncate_dataset.go @@ -0,0 +1,169 @@ +package goqu + +import ( + "github.com/doug-martin/goqu/v9/exec" + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +type TruncateDataset struct { + dialect SQLDialect + clauses exp.TruncateClauses + isPrepared prepared + queryFactory exec.QueryFactory + err error +} + +// used internally by database to create a database with a specific adapter +func newTruncateDataset(d string, queryFactory exec.QueryFactory) *TruncateDataset { + return &TruncateDataset{ + clauses: exp.NewTruncateClauses(), + dialect: GetDialect(d), + queryFactory: queryFactory, + } +} + +func Truncate(table ...interface{}) *TruncateDataset { + return newTruncateDataset("default", nil).Table(table...) +} + +// Sets the adapter used to serialize values and create the SQL statement +func (td *TruncateDataset) WithDialect(dl string) *TruncateDataset { + ds := td.copy(td.GetClauses()) + ds.dialect = GetDialect(dl) + return ds +} + +// Set the parameter interpolation behavior. See examples +// +// prepared: If true the dataset WILL NOT interpolate the parameters. +func (td *TruncateDataset) Prepared(prepared bool) *TruncateDataset { + ret := td.copy(td.clauses) + ret.isPrepared = preparedFromBool(prepared) + return ret +} + +func (td *TruncateDataset) IsPrepared() bool { + return td.isPrepared.Bool() +} + +// Returns the current adapter on the dataset +func (td *TruncateDataset) Dialect() SQLDialect { + return td.dialect +} + +// Returns the current adapter on the dataset +func (td *TruncateDataset) SetDialect(dialect SQLDialect) *TruncateDataset { + cd := td.copy(td.GetClauses()) + cd.dialect = dialect + return cd +} + +func (td *TruncateDataset) Expression() exp.Expression { + return td +} + +// Clones the dataset +func (td *TruncateDataset) Clone() exp.Expression { + return td.copy(td.clauses) +} + +// Returns the current clauses on the dataset. +func (td *TruncateDataset) GetClauses() exp.TruncateClauses { + return td.clauses +} + +// used interally to copy the dataset +func (td *TruncateDataset) copy(clauses exp.TruncateClauses) *TruncateDataset { + return &TruncateDataset{ + dialect: td.dialect, + clauses: clauses, + isPrepared: td.isPrepared, + queryFactory: td.queryFactory, + err: td.err, + } +} + +// Adds a FROM clause. This return a new dataset with the original sources replaced. See examples. +// You can pass in the following. +// string: Will automatically be turned into an identifier +// IdentifierExpression +// LiteralExpression: (See Literal) Will use the literal SQL +func (td *TruncateDataset) Table(table ...interface{}) *TruncateDataset { + return td.copy(td.clauses.SetTable(exp.NewColumnListExpression(table...))) +} + +// Adds a CASCADE clause +func (td *TruncateDataset) Cascade() *TruncateDataset { + opts := td.clauses.Options() + opts.Cascade = true + return td.copy(td.clauses.SetOptions(opts)) +} + +// Clears the CASCADE clause +func (td *TruncateDataset) NoCascade() *TruncateDataset { + opts := td.clauses.Options() + opts.Cascade = false + return td.copy(td.clauses.SetOptions(opts)) +} + +// Adds a RESTRICT clause +func (td *TruncateDataset) Restrict() *TruncateDataset { + opts := td.clauses.Options() + opts.Restrict = true + return td.copy(td.clauses.SetOptions(opts)) +} + +// Clears the RESTRICT clause +func (td *TruncateDataset) NoRestrict() *TruncateDataset { + opts := td.clauses.Options() + opts.Restrict = false + return td.copy(td.clauses.SetOptions(opts)) +} + +// Add a IDENTITY clause (e.g. RESTART) +func (td *TruncateDataset) Identity(identity string) *TruncateDataset { + opts := td.clauses.Options() + opts.Identity = identity + return td.copy(td.clauses.SetOptions(opts)) +} + +// Get any error that has been set or nil if no error has been set. +func (td *TruncateDataset) Error() error { + return td.err +} + +// Set an error on the dataset if one has not already been set. This error will be returned by a future call to Error +// or as part of ToSQL. This can be used by end users to record errors while building up queries without having to +// track those separately. +func (td *TruncateDataset) SetError(err error) *TruncateDataset { + if td.err == nil { + td.err = err + } + + return td +} + +// Generates a TRUNCATE sql statement, if Prepared has been called with true then the parameters will not be interpolated. +// See examples. +// +// Errors: +// * There is an error generating the SQL +func (td *TruncateDataset) ToSQL() (sql string, params []interface{}, err error) { + return td.truncateSQLBuilder().ToSQL() +} + +// Generates the TRUNCATE sql, and returns an Exec struct with the sql set to the TRUNCATE statement +// db.From("test").Truncate().Executor().Exec() +func (td *TruncateDataset) Executor() exec.QueryExecutor { + return td.queryFactory.FromSQLBuilder(td.truncateSQLBuilder()) +} + +func (td *TruncateDataset) truncateSQLBuilder() sb.SQLBuilder { + buf := sb.NewSQLBuilder(td.isPrepared.Bool()) + if td.err != nil { + return buf.SetError(td.err) + } + td.dialect.ToTruncateSQL(buf, td.clauses) + return buf +} diff --git a/vendor/github.com/doug-martin/goqu/v9/update_dataset.go b/vendor/github.com/doug-martin/goqu/v9/update_dataset.go new file mode 100644 index 000000000..e2eaa53bb --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/update_dataset.go @@ -0,0 +1,245 @@ +package goqu + +import ( + "github.com/doug-martin/goqu/v9/exec" + "github.com/doug-martin/goqu/v9/exp" + "github.com/doug-martin/goqu/v9/internal/errors" + "github.com/doug-martin/goqu/v9/internal/sb" +) + +type UpdateDataset struct { + dialect SQLDialect + clauses exp.UpdateClauses + isPrepared prepared + queryFactory exec.QueryFactory + err error +} + +var ErrUnsupportedUpdateTableType = errors.New("unsupported table type, a string or identifier expression is required") + +// used internally by database to create a database with a specific adapter +func newUpdateDataset(d string, queryFactory exec.QueryFactory) *UpdateDataset { + return &UpdateDataset{ + clauses: exp.NewUpdateClauses(), + dialect: GetDialect(d), + queryFactory: queryFactory, + } +} + +func Update(table interface{}) *UpdateDataset { + return newUpdateDataset("default", nil).Table(table) +} + +// Set the parameter interpolation behavior. See examples +// +// prepared: If true the dataset WILL NOT interpolate the parameters. +func (ud *UpdateDataset) Prepared(prepared bool) *UpdateDataset { + ret := ud.copy(ud.clauses) + ret.isPrepared = preparedFromBool(prepared) + return ret +} + +func (ud *UpdateDataset) IsPrepared() bool { + return ud.isPrepared.Bool() +} + +// Sets the adapter used to serialize values and create the SQL statement +func (ud *UpdateDataset) WithDialect(dl string) *UpdateDataset { + ds := ud.copy(ud.GetClauses()) + ds.dialect = GetDialect(dl) + return ds +} + +// Returns the current adapter on the dataset +func (ud *UpdateDataset) Dialect() SQLDialect { + return ud.dialect +} + +// Returns the current adapter on the dataset +func (ud *UpdateDataset) SetDialect(dialect SQLDialect) *UpdateDataset { + cd := ud.copy(ud.GetClauses()) + cd.dialect = dialect + return cd +} + +func (ud *UpdateDataset) Expression() exp.Expression { + return ud +} + +// Clones the dataset +func (ud *UpdateDataset) Clone() exp.Expression { + return ud.copy(ud.clauses) +} + +// Returns the current clauses on the dataset. +func (ud *UpdateDataset) GetClauses() exp.UpdateClauses { + return ud.clauses +} + +// used internally to copy the dataset +func (ud *UpdateDataset) copy(clauses exp.UpdateClauses) *UpdateDataset { + return &UpdateDataset{ + dialect: ud.dialect, + clauses: clauses, + isPrepared: ud.isPrepared, + queryFactory: ud.queryFactory, + err: ud.err, + } +} + +// Creates a WITH clause for a common table expression (CTE). +// +// The name will be available to use in the UPDATE from in the associated query; and can optionally +// contain a list of column names "name(col1, col2, col3)". +// +// The name will refer to the results of the specified subquery. +func (ud *UpdateDataset) With(name string, subquery exp.Expression) *UpdateDataset { + return ud.copy(ud.clauses.CommonTablesAppend(exp.NewCommonTableExpression(false, name, subquery))) +} + +// Creates a WITH RECURSIVE clause for a common table expression (CTE) +// +// The name will be available to use in the UPDATE from in the associated query; and must +// contain a list of column names "name(col1, col2, col3)" for a recursive clause. +// +// The name will refer to the results of the specified subquery. The subquery for +// a recursive query will always end with a UNION or UNION ALL with a clause that +// refers to the CTE by name. +func (ud *UpdateDataset) WithRecursive(name string, subquery exp.Expression) *UpdateDataset { + return ud.copy(ud.clauses.CommonTablesAppend(exp.NewCommonTableExpression(true, name, subquery))) +} + +// Sets the table to update. +func (ud *UpdateDataset) Table(table interface{}) *UpdateDataset { + switch t := table.(type) { + case exp.Expression: + return ud.copy(ud.clauses.SetTable(t)) + case string: + return ud.copy(ud.clauses.SetTable(exp.ParseIdentifier(t))) + default: + panic(ErrUnsupportedUpdateTableType) + } +} + +// Sets the values to use in the SET clause. See examples. +func (ud *UpdateDataset) Set(values interface{}) *UpdateDataset { + return ud.copy(ud.clauses.SetSetValues(values)) +} + +// Allows specifying other tables to reference in your update (If your dialect supports it). See examples. +func (ud *UpdateDataset) From(tables ...interface{}) *UpdateDataset { + return ud.copy(ud.clauses.SetFrom(exp.NewColumnListExpression(tables...))) +} + +// Adds a WHERE clause. See examples. +func (ud *UpdateDataset) Where(expressions ...exp.Expression) *UpdateDataset { + return ud.copy(ud.clauses.WhereAppend(expressions...)) +} + +// Removes the WHERE clause. See examples. +func (ud *UpdateDataset) ClearWhere() *UpdateDataset { + return ud.copy(ud.clauses.ClearWhere()) +} + +// Adds a ORDER clause. If the ORDER is currently set it replaces it. See examples. +func (ud *UpdateDataset) Order(order ...exp.OrderedExpression) *UpdateDataset { + return ud.copy(ud.clauses.SetOrder(order...)) +} + +// Adds a more columns to the current ORDER BY clause. If no order has be previously specified it is the same as +// calling Order. See examples. +func (ud *UpdateDataset) OrderAppend(order ...exp.OrderedExpression) *UpdateDataset { + return ud.copy(ud.clauses.OrderAppend(order...)) +} + +// Adds a more columns to the beginning of the current ORDER BY clause. If no order has be previously specified it is the same as +// calling Order. See examples. +func (ud *UpdateDataset) OrderPrepend(order ...exp.OrderedExpression) *UpdateDataset { + return ud.copy(ud.clauses.OrderPrepend(order...)) +} + +// Removes the ORDER BY clause. See examples. +func (ud *UpdateDataset) ClearOrder() *UpdateDataset { + return ud.copy(ud.clauses.ClearOrder()) +} + +// Adds a LIMIT clause. If the LIMIT is currently set it replaces it. See examples. +func (ud *UpdateDataset) Limit(limit uint) *UpdateDataset { + if limit > 0 { + return ud.copy(ud.clauses.SetLimit(limit)) + } + return ud.copy(ud.clauses.ClearLimit()) +} + +// Adds a LIMIT ALL clause. If the LIMIT is currently set it replaces it. See examples. +func (ud *UpdateDataset) LimitAll() *UpdateDataset { + return ud.copy(ud.clauses.SetLimit(L("ALL"))) +} + +// Removes the LIMIT clause. +func (ud *UpdateDataset) ClearLimit() *UpdateDataset { + return ud.copy(ud.clauses.ClearLimit()) +} + +// Adds a RETURNING clause to the dataset if the adapter supports it. See examples. +func (ud *UpdateDataset) Returning(returning ...interface{}) *UpdateDataset { + return ud.copy(ud.clauses.SetReturning(exp.NewColumnListExpression(returning...))) +} + +// Get any error that has been set or nil if no error has been set. +func (ud *UpdateDataset) Error() error { + return ud.err +} + +// Set an error on the dataset if one has not already been set. This error will be returned by a future call to Error +// or as part of ToSQL. This can be used by end users to record errors while building up queries without having to +// track those separately. +func (ud *UpdateDataset) SetError(err error) *UpdateDataset { + if ud.err == nil { + ud.err = err + } + + return ud +} + +// Generates an UPDATE sql statement, if Prepared has been called with true then the parameters will not be interpolated. +// See examples. +// +// Errors: +// * There is an error generating the SQL +func (ud *UpdateDataset) ToSQL() (sql string, params []interface{}, err error) { + return ud.updateSQLBuilder().ToSQL() +} + +// Appends this Dataset's UPDATE statement to the SQLBuilder +// This is used internally when using updates in CTEs +func (ud *UpdateDataset) AppendSQL(b sb.SQLBuilder) { + if ud.err != nil { + b.SetError(ud.err) + return + } + ud.dialect.ToUpdateSQL(b, ud.GetClauses()) +} + +func (ud *UpdateDataset) GetAs() exp.IdentifierExpression { + return nil +} + +func (ud *UpdateDataset) ReturnsColumns() bool { + return ud.clauses.HasReturning() +} + +// Generates the UPDATE sql, and returns an exec.QueryExecutor with the sql set to the UPDATE statement +// db.Update("test").Set(Record{"name":"Bob", update: time.Now()}).Executor() +func (ud *UpdateDataset) Executor() exec.QueryExecutor { + return ud.queryFactory.FromSQLBuilder(ud.updateSQLBuilder()) +} + +func (ud *UpdateDataset) updateSQLBuilder() sb.SQLBuilder { + buf := sb.NewSQLBuilder(ud.isPrepared.Bool()) + if ud.err != nil { + return buf.SetError(ud.err) + } + ud.dialect.ToUpdateSQL(buf, ud.clauses) + return buf +} diff --git a/vendor/github.com/doug-martin/goqu/v9/wait-for-it.sh b/vendor/github.com/doug-martin/goqu/v9/wait-for-it.sh new file mode 100644 index 000000000..92cbdbb3c --- /dev/null +++ b/vendor/github.com/doug-martin/goqu/v9/wait-for-it.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash +# Use this script to test if a given TCP host/port are available + +WAITFORIT_cmdname=${0##*/} + +echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } + +usage() +{ + cat << USAGE >&2 +Usage: + $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] + -h HOST | --host=HOST Host or IP under test + -p PORT | --port=PORT TCP port under test + Alternatively, you specify the host and port as host:port + -s | --strict Only execute subcommand if the test succeeds + -q | --quiet Don't output any status messages + -t TIMEOUT | --timeout=TIMEOUT + Timeout in seconds, zero for no timeout + -- COMMAND ARGS Execute command with args after the test finishes +USAGE + exit 1 +} + +wait_for() +{ + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + else + echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" + fi + WAITFORIT_start_ts=$(date +%s) + while : + do + if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then + nc -z $WAITFORIT_HOST $WAITFORIT_PORT + WAITFORIT_result=$? + else + (echo > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 + WAITFORIT_result=$? + fi + if [[ $WAITFORIT_result -eq 0 ]]; then + WAITFORIT_end_ts=$(date +%s) + echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" + break + fi + sleep 1 + done + return $WAITFORIT_result +} + +wait_for_wrapper() +{ + # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 + if [[ $WAITFORIT_QUIET -eq 1 ]]; then + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + else + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + fi + WAITFORIT_PID=$! + trap "kill -INT -$WAITFORIT_PID" INT + wait $WAITFORIT_PID + WAITFORIT_RESULT=$? + if [[ $WAITFORIT_RESULT -ne 0 ]]; then + echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + fi + return $WAITFORIT_RESULT +} + +# process arguments +while [[ $# -gt 0 ]] +do + case "$1" in + *:* ) + WAITFORIT_hostport=(${1//:/ }) + WAITFORIT_HOST=${WAITFORIT_hostport[0]} + WAITFORIT_PORT=${WAITFORIT_hostport[1]} + shift 1 + ;; + --child) + WAITFORIT_CHILD=1 + shift 1 + ;; + -q | --quiet) + WAITFORIT_QUIET=1 + shift 1 + ;; + -s | --strict) + WAITFORIT_STRICT=1 + shift 1 + ;; + -h) + WAITFORIT_HOST="$2" + if [[ $WAITFORIT_HOST == "" ]]; then break; fi + shift 2 + ;; + --host=*) + WAITFORIT_HOST="${1#*=}" + shift 1 + ;; + -p) + WAITFORIT_PORT="$2" + if [[ $WAITFORIT_PORT == "" ]]; then break; fi + shift 2 + ;; + --port=*) + WAITFORIT_PORT="${1#*=}" + shift 1 + ;; + -t) + WAITFORIT_TIMEOUT="$2" + if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi + shift 2 + ;; + --timeout=*) + WAITFORIT_TIMEOUT="${1#*=}" + shift 1 + ;; + --) + shift + WAITFORIT_CLI=("$@") + break + ;; + --help) + usage + ;; + *) + echoerr "Unknown argument: $1" + usage + ;; + esac +done + +if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then + echoerr "Error: you need to provide a host and port to test." + usage +fi + +WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} +WAITFORIT_STRICT=${WAITFORIT_STRICT:-0} +WAITFORIT_CHILD=${WAITFORIT_CHILD:-0} +WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} + +# Check to see if timeout is from busybox? +WAITFORIT_TIMEOUT_PATH=$(type -p timeout) +WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) + +WAITFORIT_BUSYTIMEFLAG="" +if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then + WAITFORIT_ISBUSY=1 + # Check if busybox timeout uses -t flag + # (recent Alpine versions don't support -t anymore) + if timeout &>/dev/stdout | grep -q -e '-t '; then + WAITFORIT_BUSYTIMEFLAG="-t" + fi +else + WAITFORIT_ISBUSY=0 +fi + +if [[ $WAITFORIT_CHILD -gt 0 ]]; then + wait_for + WAITFORIT_RESULT=$? + exit $WAITFORIT_RESULT +else + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + wait_for_wrapper + WAITFORIT_RESULT=$? + else + wait_for + WAITFORIT_RESULT=$? + fi +fi + +if [[ $WAITFORIT_CLI != "" ]]; then + if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then + echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" + exit $WAITFORIT_RESULT + fi + exec "${WAITFORIT_CLI[@]}" +else + exit $WAITFORIT_RESULT +fi \ No newline at end of file diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index c1a4d8f92..a974948f8 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,7 @@ #ifndef USE_LIBSQLITE3 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.34.0. By combining all the individual C code files into this +** version 3.35.4. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -285,6 +285,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_ENABLE_LOCKING_STYLE "ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE), #endif +#if SQLITE_ENABLE_MATH_FUNCTIONS + "ENABLE_MATH_FUNCTIONS", +#endif #if SQLITE_ENABLE_MEMORY_MANAGEMENT "ENABLE_MEMORY_MANAGEMENT", #endif @@ -991,6 +994,18 @@ SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt){ # define MSVC_VERSION 0 #endif +/* +** Some C99 functions in "math.h" are only present for MSVC when its version +** is associated with Visual Studio 2013 or higher. +*/ +#ifndef SQLITE_HAVE_C99_MATH_FUNCS +# if MSVC_VERSION==0 || MSVC_VERSION>=1800 +# define SQLITE_HAVE_C99_MATH_FUNCS (1) +# else +# define SQLITE_HAVE_C99_MATH_FUNCS (0) +# endif +#endif + /* Needed for various definitions... */ #if defined(__GNUC__) && !defined(_GNU_SOURCE) # define _GNU_SOURCE @@ -1172,9 +1187,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.34.0" -#define SQLITE_VERSION_NUMBER 3034000 -#define SQLITE_SOURCE_ID "2020-12-01 16:14:00 a26b6597e3ae272231b96f9982c3bcc17ddec2f2b6eb4df06a224b91089fed5b" +#define SQLITE_VERSION "3.35.4" +#define SQLITE_VERSION_NUMBER 3035004 +#define SQLITE_SOURCE_ID "2021-04-02 15:20:15 5d4c65779dab868b285519b19e4cf9d451d50c6048f06f653aa701ec212df45e" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -3164,7 +3179,13 @@ struct sqlite3_mem_methods { ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether triggers are disabled or enabled ** following this call. The second parameter may be a NULL pointer, in -** which case the trigger setting is not reported back. </dd> +** which case the trigger setting is not reported back. +** +** <p>Originally this option disabled all triggers. ^(However, since +** SQLite version 3.35.0, TEMP triggers are still allowed even if +** this option is off. So, in other words, this option now only disables +** triggers in the main database schema or in the schemas of ATTACH-ed +** databases.)^ </dd> ** ** [[SQLITE_DBCONFIG_ENABLE_VIEW]] ** <dt>SQLITE_DBCONFIG_ENABLE_VIEW</dt> @@ -3175,7 +3196,13 @@ struct sqlite3_mem_methods { ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether views are disabled or enabled ** following this call. The second parameter may be a NULL pointer, in -** which case the view setting is not reported back. </dd> +** which case the view setting is not reported back. +** +** <p>Originally this option disabled all views. ^(However, since +** SQLite version 3.35.0, TEMP views are still allowed even if +** this option is off. So, in other words, this option now only disables +** views in the main database schema or in the schemas of ATTACH-ed +** databases.)^ </dd> ** ** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]] ** <dt>SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER</dt> @@ -4548,6 +4575,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** that uses dot-files in place of posix advisory locking. ** <tr><td> file:data.db?mode=readonly <td> ** An error. "readonly" is not a valid option for the "mode" parameter. +** Use "ro" instead: "file:data.db?mode=ro". ** </table> ** ** ^URI hexadecimal escape sequences (%HH) are supported within the path and @@ -4746,7 +4774,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** If the Y parameter to sqlite3_free_filename(Y) is anything other ** than a NULL pointer or a pointer previously acquired from ** sqlite3_create_filename(), then bad things such as heap -** corruption or segfaults may occur. The value Y should be +** corruption or segfaults may occur. The value Y should not be ** used again after sqlite3_free_filename(Y) has been called. This means ** that if the [sqlite3_vfs.xOpen()] method of a VFS has been called using Y, ** then the corresponding [sqlite3_module.xClose() method should also be @@ -8814,7 +8842,8 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_PRNG_SEED 28 #define SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS 29 #define SQLITE_TESTCTRL_SEEK_COUNT 30 -#define SQLITE_TESTCTRL_LAST 30 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_TRACEFLAGS 31 +#define SQLITE_TESTCTRL_LAST 31 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking @@ -11487,6 +11516,14 @@ SQLITE_API int sqlite3session_patchset( */ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); +/* +** CAPI3REF: Query for the amount of heap memory used by a session object. +** +** This API returns the total amount of heap memory in bytes currently +** used by the session object passed as the only argument. +*/ +SQLITE_API sqlite3_int64 sqlite3session_memory_used(sqlite3_session *pSession); + /* ** CAPI3REF: Create An Iterator To Traverse A Changeset ** CONSTRUCTOR: sqlite3_changeset_iter @@ -11589,18 +11626,23 @@ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); ** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this ** is not the case, this function returns [SQLITE_MISUSE]. ** -** If argument pzTab is not NULL, then *pzTab is set to point to a -** nul-terminated utf-8 encoded string containing the name of the table -** affected by the current change. The buffer remains valid until either -** sqlite3changeset_next() is called on the iterator or until the -** conflict-handler function returns. If pnCol is not NULL, then *pnCol is -** set to the number of columns in the table affected by the change. If -** pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change +** Arguments pOp, pnCol and pzTab may not be NULL. Upon return, three +** outputs are set through these pointers: +** +** *pOp is set to one of [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], +** depending on the type of change that the iterator currently points to; +** +** *pnCol is set to the number of columns in the table affected by the change; and +** +** *pzTab is set to point to a nul-terminated utf-8 encoded string containing +** the name of the table affected by the current change. The buffer remains +** valid until either sqlite3changeset_next() is called on the iterator +** or until the conflict-handler function returns. +** +** If pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change ** is an indirect change, or false (0) otherwise. See the documentation for ** [sqlite3session_indirect()] for a description of direct and indirect -** changes. Finally, if pOp is not NULL, then *pOp is set to one of -** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the -** type of change that the iterator currently points to. +** changes. ** ** If no error occurs, SQLITE_OK is returned. If an error does occur, an ** SQLite error code is returned. The values of the output variables may not @@ -13529,7 +13571,8 @@ struct fts5_api { #ifndef __has_extension # define __has_extension(x) 0 /* compatibility with non-clang compilers */ #endif -#if GCC_VERSION>=4007000 || __has_extension(c_atomic) +#if GCC_VERSION>=4007000 || \ + (__has_extension(c_atomic) && __has_extension(c_atomic_store_n)) # define AtomicLoad(PTR) __atomic_load_n((PTR),__ATOMIC_RELAXED) # define AtomicStore(PTR,VAL) __atomic_store_n((PTR),(VAL),__ATOMIC_RELAXED) #else @@ -14096,90 +14139,92 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_TIES 94 #define TK_GENERATED 95 #define TK_ALWAYS 96 -#define TK_REINDEX 97 -#define TK_RENAME 98 -#define TK_CTIME_KW 99 -#define TK_ANY 100 -#define TK_BITAND 101 -#define TK_BITOR 102 -#define TK_LSHIFT 103 -#define TK_RSHIFT 104 -#define TK_PLUS 105 -#define TK_MINUS 106 -#define TK_STAR 107 -#define TK_SLASH 108 -#define TK_REM 109 -#define TK_CONCAT 110 -#define TK_COLLATE 111 -#define TK_BITNOT 112 -#define TK_ON 113 -#define TK_INDEXED 114 -#define TK_STRING 115 -#define TK_JOIN_KW 116 -#define TK_CONSTRAINT 117 -#define TK_DEFAULT 118 -#define TK_NULL 119 -#define TK_PRIMARY 120 -#define TK_UNIQUE 121 -#define TK_CHECK 122 -#define TK_REFERENCES 123 -#define TK_AUTOINCR 124 -#define TK_INSERT 125 -#define TK_DELETE 126 -#define TK_UPDATE 127 -#define TK_SET 128 -#define TK_DEFERRABLE 129 -#define TK_FOREIGN 130 -#define TK_DROP 131 -#define TK_UNION 132 -#define TK_ALL 133 -#define TK_EXCEPT 134 -#define TK_INTERSECT 135 -#define TK_SELECT 136 -#define TK_VALUES 137 -#define TK_DISTINCT 138 -#define TK_DOT 139 -#define TK_FROM 140 -#define TK_JOIN 141 -#define TK_USING 142 -#define TK_ORDER 143 -#define TK_GROUP 144 -#define TK_HAVING 145 -#define TK_LIMIT 146 -#define TK_WHERE 147 -#define TK_INTO 148 -#define TK_NOTHING 149 -#define TK_FLOAT 150 -#define TK_BLOB 151 -#define TK_INTEGER 152 -#define TK_VARIABLE 153 -#define TK_CASE 154 -#define TK_WHEN 155 -#define TK_THEN 156 -#define TK_ELSE 157 -#define TK_INDEX 158 -#define TK_ALTER 159 -#define TK_ADD 160 -#define TK_WINDOW 161 -#define TK_OVER 162 -#define TK_FILTER 163 -#define TK_COLUMN 164 -#define TK_AGG_FUNCTION 165 -#define TK_AGG_COLUMN 166 -#define TK_TRUEFALSE 167 -#define TK_ISNOT 168 -#define TK_FUNCTION 169 -#define TK_UMINUS 170 -#define TK_UPLUS 171 -#define TK_TRUTH 172 -#define TK_REGISTER 173 -#define TK_VECTOR 174 -#define TK_SELECT_COLUMN 175 -#define TK_IF_NULL_ROW 176 -#define TK_ASTERISK 177 -#define TK_SPAN 178 -#define TK_SPACE 179 -#define TK_ILLEGAL 180 +#define TK_MATERIALIZED 97 +#define TK_REINDEX 98 +#define TK_RENAME 99 +#define TK_CTIME_KW 100 +#define TK_ANY 101 +#define TK_BITAND 102 +#define TK_BITOR 103 +#define TK_LSHIFT 104 +#define TK_RSHIFT 105 +#define TK_PLUS 106 +#define TK_MINUS 107 +#define TK_STAR 108 +#define TK_SLASH 109 +#define TK_REM 110 +#define TK_CONCAT 111 +#define TK_COLLATE 112 +#define TK_BITNOT 113 +#define TK_ON 114 +#define TK_INDEXED 115 +#define TK_STRING 116 +#define TK_JOIN_KW 117 +#define TK_CONSTRAINT 118 +#define TK_DEFAULT 119 +#define TK_NULL 120 +#define TK_PRIMARY 121 +#define TK_UNIQUE 122 +#define TK_CHECK 123 +#define TK_REFERENCES 124 +#define TK_AUTOINCR 125 +#define TK_INSERT 126 +#define TK_DELETE 127 +#define TK_UPDATE 128 +#define TK_SET 129 +#define TK_DEFERRABLE 130 +#define TK_FOREIGN 131 +#define TK_DROP 132 +#define TK_UNION 133 +#define TK_ALL 134 +#define TK_EXCEPT 135 +#define TK_INTERSECT 136 +#define TK_SELECT 137 +#define TK_VALUES 138 +#define TK_DISTINCT 139 +#define TK_DOT 140 +#define TK_FROM 141 +#define TK_JOIN 142 +#define TK_USING 143 +#define TK_ORDER 144 +#define TK_GROUP 145 +#define TK_HAVING 146 +#define TK_LIMIT 147 +#define TK_WHERE 148 +#define TK_RETURNING 149 +#define TK_INTO 150 +#define TK_NOTHING 151 +#define TK_FLOAT 152 +#define TK_BLOB 153 +#define TK_INTEGER 154 +#define TK_VARIABLE 155 +#define TK_CASE 156 +#define TK_WHEN 157 +#define TK_THEN 158 +#define TK_ELSE 159 +#define TK_INDEX 160 +#define TK_ALTER 161 +#define TK_ADD 162 +#define TK_WINDOW 163 +#define TK_OVER 164 +#define TK_FILTER 165 +#define TK_COLUMN 166 +#define TK_AGG_FUNCTION 167 +#define TK_AGG_COLUMN 168 +#define TK_TRUEFALSE 169 +#define TK_ISNOT 170 +#define TK_FUNCTION 171 +#define TK_UMINUS 172 +#define TK_UPLUS 173 +#define TK_TRUTH 174 +#define TK_REGISTER 175 +#define TK_VECTOR 176 +#define TK_SELECT_COLUMN 177 +#define TK_IF_NULL_ROW 178 +#define TK_ASTERISK 179 +#define TK_SPAN 180 +#define TK_SPACE 181 +#define TK_ILLEGAL 182 /************** End of parse.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -14595,15 +14640,14 @@ typedef INT16_TYPE LogEst; ** SELECTTRACE_ENABLED will be either 1 or 0 depending on whether or not ** the Select query generator tracing logic is turned on. */ -#if defined(SQLITE_ENABLE_SELECTTRACE) -# define SELECTTRACE_ENABLED 1 -#else -# define SELECTTRACE_ENABLED 0 +#if !defined(SQLITE_AMALGAMATION) +SQLITE_PRIVATE u32 sqlite3SelectTrace; #endif -#if defined(SQLITE_ENABLE_SELECTTRACE) +#if defined(SQLITE_DEBUG) \ + && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_SELECTTRACE)) # define SELECTTRACE_ENABLED 1 # define SELECTTRACE(K,P,S,X) \ - if(sqlite3_unsupported_selecttrace&(K)) \ + if(sqlite3SelectTrace&(K)) \ sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\ sqlite3DebugPrintf X #else @@ -14611,6 +14655,19 @@ typedef INT16_TYPE LogEst; # define SELECTTRACE_ENABLED 0 #endif +/* +** Macros for "wheretrace" +*/ +SQLITE_PRIVATE u32 sqlite3WhereTrace; +#if defined(SQLITE_DEBUG) \ + && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_WHERETRACE)) +# define WHERETRACE(K,X) if(sqlite3WhereTrace&(K)) sqlite3DebugPrintf X +# define WHERETRACE_ENABLED 1 +#else +# define WHERETRACE(K,X) +#endif + + /* ** An instance of the following structure is used to store the busy-handler ** callback for a given sqlite handle. @@ -14722,7 +14779,10 @@ typedef struct AutoincInfo AutoincInfo; typedef struct Bitvec Bitvec; typedef struct CollSeq CollSeq; typedef struct Column Column; +typedef struct Cte Cte; +typedef struct CteUse CteUse; typedef struct Db Db; +typedef struct DbFixer DbFixer; typedef struct Schema Schema; typedef struct Expr Expr; typedef struct ExprList ExprList; @@ -14740,14 +14800,17 @@ typedef struct LookasideSlot LookasideSlot; typedef struct Module Module; typedef struct NameContext NameContext; typedef struct Parse Parse; +typedef struct ParseCleanup ParseCleanup; typedef struct PreUpdate PreUpdate; typedef struct PrintfArguments PrintfArguments; typedef struct RenameToken RenameToken; +typedef struct Returning Returning; typedef struct RowSet RowSet; typedef struct Savepoint Savepoint; typedef struct Select Select; typedef struct SQLiteThread SQLiteThread; typedef struct SelectDest SelectDest; +typedef struct SrcItem SrcItem; typedef struct SrcList SrcList; typedef struct sqlite3_str StrAccum; /* Internal alias for sqlite3_str */ typedef struct Table Table; @@ -15319,6 +15382,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, u8 flags); #define BTREE_SAVEPOSITION 0x02 /* Leave cursor pointing at NEXT or PREV */ #define BTREE_AUXDELETE 0x04 /* not the primary delete operation */ #define BTREE_APPEND 0x08 /* Insert is likely an append */ +#define BTREE_PREFORMAT 0x80 /* Inserted data is a preformated cell */ /* An instance of the BtreePayload object describes the content of a single ** entry in either an index or table btree. @@ -15418,6 +15482,8 @@ SQLITE_PRIVATE void sqlite3BtreeCursorList(Btree*); SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *); #endif +SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor*, BtCursor*, i64); + /* ** If we are not using shared cache, then there is no need to ** use mutexes to access the BtShared structures. So make the @@ -15717,103 +15783,105 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Copy 77 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ #define OP_SCopy 78 /* synopsis: r[P2]=r[P1] */ #define OP_IntCopy 79 /* synopsis: r[P2]=r[P1] */ -#define OP_ResultRow 80 /* synopsis: output=r[P1@P2] */ -#define OP_CollSeq 81 -#define OP_AddImm 82 /* synopsis: r[P1]=r[P1]+P2 */ -#define OP_RealAffinity 83 -#define OP_Cast 84 /* synopsis: affinity(r[P1]) */ -#define OP_Permutation 85 -#define OP_Compare 86 /* synopsis: r[P1@P3] <-> r[P2@P3] */ -#define OP_IsTrue 87 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */ -#define OP_Offset 88 /* synopsis: r[P3] = sqlite_offset(P1) */ -#define OP_Column 89 /* synopsis: r[P3]=PX */ -#define OP_Affinity 90 /* synopsis: affinity(r[P1@P2]) */ -#define OP_MakeRecord 91 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ -#define OP_Count 92 /* synopsis: r[P2]=count() */ -#define OP_ReadCookie 93 -#define OP_SetCookie 94 -#define OP_ReopenIdx 95 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenRead 96 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenWrite 97 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenDup 98 -#define OP_OpenAutoindex 99 /* synopsis: nColumn=P2 */ -#define OP_OpenEphemeral 100 /* synopsis: nColumn=P2 */ -#define OP_BitAnd 101 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ -#define OP_BitOr 102 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ -#define OP_ShiftLeft 103 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<<r[P1] */ -#define OP_ShiftRight 104 /* same as TK_RSHIFT, synopsis: r[P3]=r[P2]>>r[P1] */ -#define OP_Add 105 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ -#define OP_Subtract 106 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ -#define OP_Multiply 107 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ -#define OP_Divide 108 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ -#define OP_Remainder 109 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ -#define OP_Concat 110 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ -#define OP_SorterOpen 111 -#define OP_BitNot 112 /* same as TK_BITNOT, synopsis: r[P2]= ~r[P1] */ -#define OP_SequenceTest 113 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ -#define OP_OpenPseudo 114 /* synopsis: P3 columns in r[P2] */ -#define OP_String8 115 /* same as TK_STRING, synopsis: r[P2]='P4' */ -#define OP_Close 116 -#define OP_ColumnsUsed 117 -#define OP_SeekScan 118 /* synopsis: Scan-ahead up to P1 rows */ -#define OP_SeekHit 119 /* synopsis: set P2<=seekHit<=P3 */ -#define OP_Sequence 120 /* synopsis: r[P2]=cursor[P1].ctr++ */ -#define OP_NewRowid 121 /* synopsis: r[P2]=rowid */ -#define OP_Insert 122 /* synopsis: intkey=r[P3] data=r[P2] */ -#define OP_Delete 123 -#define OP_ResetCount 124 -#define OP_SorterCompare 125 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ -#define OP_SorterData 126 /* synopsis: r[P2]=data */ -#define OP_RowData 127 /* synopsis: r[P2]=data */ -#define OP_Rowid 128 /* synopsis: r[P2]=rowid */ -#define OP_NullRow 129 -#define OP_SeekEnd 130 -#define OP_IdxInsert 131 /* synopsis: key=r[P2] */ -#define OP_SorterInsert 132 /* synopsis: key=r[P2] */ -#define OP_IdxDelete 133 /* synopsis: key=r[P2@P3] */ -#define OP_DeferredSeek 134 /* synopsis: Move P3 to P1.rowid if needed */ -#define OP_IdxRowid 135 /* synopsis: r[P2]=rowid */ -#define OP_FinishSeek 136 -#define OP_Destroy 137 -#define OP_Clear 138 -#define OP_ResetSorter 139 -#define OP_CreateBtree 140 /* synopsis: r[P2]=root iDb=P1 flags=P3 */ -#define OP_SqlExec 141 -#define OP_ParseSchema 142 -#define OP_LoadAnalysis 143 -#define OP_DropTable 144 -#define OP_DropIndex 145 -#define OP_DropTrigger 146 -#define OP_IntegrityCk 147 -#define OP_RowSetAdd 148 /* synopsis: rowset(P1)=r[P2] */ -#define OP_Param 149 -#define OP_Real 150 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ -#define OP_FkCounter 151 /* synopsis: fkctr[P1]+=P2 */ -#define OP_MemMax 152 /* synopsis: r[P1]=max(r[P1],r[P2]) */ -#define OP_OffsetLimit 153 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ -#define OP_AggInverse 154 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */ -#define OP_AggStep 155 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggStep1 156 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggValue 157 /* synopsis: r[P3]=value N=P2 */ -#define OP_AggFinal 158 /* synopsis: accum=r[P1] N=P2 */ -#define OP_Expire 159 -#define OP_CursorLock 160 -#define OP_CursorUnlock 161 -#define OP_TableLock 162 /* synopsis: iDb=P1 root=P2 write=P3 */ -#define OP_VBegin 163 -#define OP_VCreate 164 -#define OP_VDestroy 165 -#define OP_VOpen 166 -#define OP_VColumn 167 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VRename 168 -#define OP_Pagecount 169 -#define OP_MaxPgcnt 170 -#define OP_Trace 171 -#define OP_CursorHint 172 -#define OP_ReleaseReg 173 /* synopsis: release r[P1@P2] mask P3 */ -#define OP_Noop 174 -#define OP_Explain 175 -#define OP_Abortable 176 +#define OP_ChngCntRow 80 /* synopsis: output=r[P1] */ +#define OP_ResultRow 81 /* synopsis: output=r[P1@P2] */ +#define OP_CollSeq 82 +#define OP_AddImm 83 /* synopsis: r[P1]=r[P1]+P2 */ +#define OP_RealAffinity 84 +#define OP_Cast 85 /* synopsis: affinity(r[P1]) */ +#define OP_Permutation 86 +#define OP_Compare 87 /* synopsis: r[P1@P3] <-> r[P2@P3] */ +#define OP_IsTrue 88 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */ +#define OP_Offset 89 /* synopsis: r[P3] = sqlite_offset(P1) */ +#define OP_Column 90 /* synopsis: r[P3]=PX */ +#define OP_Affinity 91 /* synopsis: affinity(r[P1@P2]) */ +#define OP_MakeRecord 92 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ +#define OP_Count 93 /* synopsis: r[P2]=count() */ +#define OP_ReadCookie 94 +#define OP_SetCookie 95 +#define OP_ReopenIdx 96 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenRead 97 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenWrite 98 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenDup 99 +#define OP_OpenAutoindex 100 /* synopsis: nColumn=P2 */ +#define OP_OpenEphemeral 101 /* synopsis: nColumn=P2 */ +#define OP_BitAnd 102 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ +#define OP_BitOr 103 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ +#define OP_ShiftLeft 104 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<<r[P1] */ +#define OP_ShiftRight 105 /* same as TK_RSHIFT, synopsis: r[P3]=r[P2]>>r[P1] */ +#define OP_Add 106 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ +#define OP_Subtract 107 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ +#define OP_Multiply 108 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ +#define OP_Divide 109 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ +#define OP_Remainder 110 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ +#define OP_Concat 111 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ +#define OP_SorterOpen 112 +#define OP_BitNot 113 /* same as TK_BITNOT, synopsis: r[P2]= ~r[P1] */ +#define OP_SequenceTest 114 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ +#define OP_OpenPseudo 115 /* synopsis: P3 columns in r[P2] */ +#define OP_String8 116 /* same as TK_STRING, synopsis: r[P2]='P4' */ +#define OP_Close 117 +#define OP_ColumnsUsed 118 +#define OP_SeekScan 119 /* synopsis: Scan-ahead up to P1 rows */ +#define OP_SeekHit 120 /* synopsis: set P2<=seekHit<=P3 */ +#define OP_Sequence 121 /* synopsis: r[P2]=cursor[P1].ctr++ */ +#define OP_NewRowid 122 /* synopsis: r[P2]=rowid */ +#define OP_Insert 123 /* synopsis: intkey=r[P3] data=r[P2] */ +#define OP_RowCell 124 +#define OP_Delete 125 +#define OP_ResetCount 126 +#define OP_SorterCompare 127 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ +#define OP_SorterData 128 /* synopsis: r[P2]=data */ +#define OP_RowData 129 /* synopsis: r[P2]=data */ +#define OP_Rowid 130 /* synopsis: r[P2]=rowid */ +#define OP_NullRow 131 +#define OP_SeekEnd 132 +#define OP_IdxInsert 133 /* synopsis: key=r[P2] */ +#define OP_SorterInsert 134 /* synopsis: key=r[P2] */ +#define OP_IdxDelete 135 /* synopsis: key=r[P2@P3] */ +#define OP_DeferredSeek 136 /* synopsis: Move P3 to P1.rowid if needed */ +#define OP_IdxRowid 137 /* synopsis: r[P2]=rowid */ +#define OP_FinishSeek 138 +#define OP_Destroy 139 +#define OP_Clear 140 +#define OP_ResetSorter 141 +#define OP_CreateBtree 142 /* synopsis: r[P2]=root iDb=P1 flags=P3 */ +#define OP_SqlExec 143 +#define OP_ParseSchema 144 +#define OP_LoadAnalysis 145 +#define OP_DropTable 146 +#define OP_DropIndex 147 +#define OP_DropTrigger 148 +#define OP_IntegrityCk 149 +#define OP_RowSetAdd 150 /* synopsis: rowset(P1)=r[P2] */ +#define OP_Param 151 +#define OP_Real 152 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ +#define OP_FkCounter 153 /* synopsis: fkctr[P1]+=P2 */ +#define OP_MemMax 154 /* synopsis: r[P1]=max(r[P1],r[P2]) */ +#define OP_OffsetLimit 155 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ +#define OP_AggInverse 156 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */ +#define OP_AggStep 157 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggStep1 158 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggValue 159 /* synopsis: r[P3]=value N=P2 */ +#define OP_AggFinal 160 /* synopsis: accum=r[P1] N=P2 */ +#define OP_Expire 161 +#define OP_CursorLock 162 +#define OP_CursorUnlock 163 +#define OP_TableLock 164 /* synopsis: iDb=P1 root=P2 write=P3 */ +#define OP_VBegin 165 +#define OP_VCreate 166 +#define OP_VDestroy 167 +#define OP_VOpen 168 +#define OP_VColumn 169 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 170 +#define OP_Pagecount 171 +#define OP_MaxPgcnt 172 +#define OP_Trace 173 +#define OP_CursorHint 174 +#define OP_ReleaseReg 175 /* synopsis: release r[P1@P2] mask P3 */ +#define OP_Noop 176 +#define OP_Explain 177 +#define OP_Abortable 178 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c @@ -15836,19 +15904,19 @@ typedef struct VdbeOpList VdbeOpList; /* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x01, 0x01, 0x01, 0x00,\ /* 64 */ 0x00, 0x02, 0x02, 0x08, 0x00, 0x10, 0x10, 0x10,\ /* 72 */ 0x10, 0x00, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10,\ -/* 80 */ 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x12,\ -/* 88 */ 0x20, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\ -/* 96 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x26, 0x26,\ -/* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x00,\ -/* 112 */ 0x12, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\ -/* 120 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 128 */ 0x10, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x10,\ -/* 136 */ 0x00, 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,\ -/* 144 */ 0x00, 0x00, 0x00, 0x00, 0x06, 0x10, 0x10, 0x00,\ -/* 152 */ 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 80 */ 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00,\ +/* 88 */ 0x12, 0x20, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\ +/* 96 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x26,\ +/* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\ +/* 112 */ 0x00, 0x12, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,\ +/* 120 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 128 */ 0x00, 0x00, 0x10, 0x00, 0x00, 0x04, 0x04, 0x00,\ +/* 136 */ 0x00, 0x10, 0x00, 0x10, 0x00, 0x00, 0x10, 0x00,\ +/* 144 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10,\ +/* 152 */ 0x10, 0x00, 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00,\ /* 160 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 168 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 176 */ 0x00,} +/* 168 */ 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00,\ +/* 176 */ 0x00, 0x00, 0x00,} /* The sqlite3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum @@ -15916,7 +15984,7 @@ SQLITE_PRIVATE void sqlite3ExplainBreakpoint(const char*,const char*); #else # define sqlite3ExplainBreakpoint(A,B) /*no-op*/ #endif -SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*); +SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*, int, char*, u16); SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe*, int addr, u8); SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, int addr, int P1); SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, int addr, int P2); @@ -16888,6 +16956,11 @@ SQLITE_PRIVATE void sqlite3CryptFunc(sqlite3_context*,int,sqlite3_value**); #endif /* SQLITE_OMIT_DEPRECATED */ #define SQLITE_TRACE_NONLEGACY_MASK 0x0f /* Normal flags */ +/* +** Maximum number of sqlite3.aDb[] entries. This is the number of attached +** databases plus 2 for "main" and "temp". +*/ +#define SQLITE_MAX_DB (SQLITE_MAX_ATTACHED+2) /* ** Each database connection is an instance of the following structure. @@ -16908,7 +16981,7 @@ struct sqlite3 { int errCode; /* Most recent error code (SQLITE_*) */ int errMask; /* & result codes with this before returning */ int iSysErrno; /* Errno value from last system error */ - u16 dbOptFlags; /* Flags to enable/disable optimizations */ + u32 dbOptFlags; /* Flags to enable/disable optimizations */ u8 enc; /* Text encoding */ u8 autoCommit; /* The auto-commit flag. */ u8 temp_store; /* 1: file 2: memory 0: default */ @@ -16935,7 +17008,10 @@ struct sqlite3 { unsigned orphanTrigger : 1; /* Last statement is orphaned TEMP trigger */ unsigned imposterTable : 1; /* Building an imposter table */ unsigned reopenMemdb : 1; /* ATTACH is really a reopen using MemDB */ + unsigned bDropColumn : 1; /* Doing schema check after DROP COLUMN */ char **azInit; /* "type", "name", and "tbl_name" columns */ + /* or if bDropColumn, then azInit[0] is the */ + /* name of the column being dropped */ } init; int nVdbeActive; /* Number of VDBEs currently running */ int nVdbeRead; /* Number of active VDBEs that read or write */ @@ -17115,24 +17191,26 @@ struct sqlite3 { ** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface to ** selectively disable various optimizations. */ -#define SQLITE_QueryFlattener 0x0001 /* Query flattening */ -#define SQLITE_WindowFunc 0x0002 /* Use xInverse for window functions */ -#define SQLITE_GroupByOrder 0x0004 /* GROUPBY cover of ORDERBY */ -#define SQLITE_FactorOutConst 0x0008 /* Constant factoring */ -#define SQLITE_DistinctOpt 0x0010 /* DISTINCT using indexes */ -#define SQLITE_CoverIdxScan 0x0020 /* Covering index scans */ -#define SQLITE_OrderByIdxJoin 0x0040 /* ORDER BY of joins via index */ -#define SQLITE_Transitive 0x0080 /* Transitive constraints */ -#define SQLITE_OmitNoopJoin 0x0100 /* Omit unused tables in joins */ -#define SQLITE_CountOfView 0x0200 /* The count-of-view optimization */ -#define SQLITE_CursorHints 0x0400 /* Add OP_CursorHint opcodes */ -#define SQLITE_Stat4 0x0800 /* Use STAT4 data */ - /* TH3 expects the Stat4 ^^^^^^ value to be 0x0800. Don't change it */ -#define SQLITE_PushDown 0x1000 /* The push-down optimization */ -#define SQLITE_SimplifyJoin 0x2000 /* Convert LEFT JOIN to JOIN */ -#define SQLITE_SkipScan 0x4000 /* Skip-scans */ -#define SQLITE_PropagateConst 0x8000 /* The constant propagation opt */ -#define SQLITE_AllOpts 0xffff /* All optimizations */ +#define SQLITE_QueryFlattener 0x00000001 /* Query flattening */ +#define SQLITE_WindowFunc 0x00000002 /* Use xInverse for window functions */ +#define SQLITE_GroupByOrder 0x00000004 /* GROUPBY cover of ORDERBY */ +#define SQLITE_FactorOutConst 0x00000008 /* Constant factoring */ +#define SQLITE_DistinctOpt 0x00000010 /* DISTINCT using indexes */ +#define SQLITE_CoverIdxScan 0x00000020 /* Covering index scans */ +#define SQLITE_OrderByIdxJoin 0x00000040 /* ORDER BY of joins via index */ +#define SQLITE_Transitive 0x00000080 /* Transitive constraints */ +#define SQLITE_OmitNoopJoin 0x00000100 /* Omit unused tables in joins */ +#define SQLITE_CountOfView 0x00000200 /* The count-of-view optimization */ +#define SQLITE_CursorHints 0x00000400 /* Add OP_CursorHint opcodes */ +#define SQLITE_Stat4 0x00000800 /* Use STAT4 data */ + /* TH3 expects this value ^^^^^^^^^^ to be 0x0000800. Don't change it */ +#define SQLITE_PushDown 0x00001000 /* The push-down optimization */ +#define SQLITE_SimplifyJoin 0x00002000 /* Convert LEFT JOIN to JOIN */ +#define SQLITE_SkipScan 0x00004000 /* Skip-scans */ +#define SQLITE_PropagateConst 0x00008000 /* The constant propagation opt */ +#define SQLITE_MinMaxOpt 0x00010000 /* The min/max optimization */ +#define SQLITE_ExistsToIN 0x00020000 /* The EXISTS-to-IN optimization */ +#define SQLITE_AllOpts 0xffffffff /* All optimizations */ /* ** Macros for testing whether or not optimizations are enabled or disabled. @@ -17288,6 +17366,9 @@ struct FuncDestructor { ** a single query. The iArg is ignored. The user-data is always set ** to a NULL pointer. The bNC parameter is not used. ** +** MFUNCTION(zName, nArg, xPtr, xFunc) +** For math-library functions. xPtr is an arbitrary pointer. +** ** PURE_DATE(zName, nArg, iArg, bNC, xFunc) ** Used for "pure" date/time functions, this macro is like DFUNCTION ** except that it does set the SQLITE_FUNC_CONSTANT flags. iArg is @@ -17323,6 +17404,9 @@ struct FuncDestructor { #define SFUNCTION(zName, nArg, iArg, bNC, xFunc) \ {nArg, SQLITE_UTF8|SQLITE_DIRECTONLY|SQLITE_FUNC_UNSAFE, \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } +#define MFUNCTION(zName, nArg, xPtr, xFunc) \ + {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ + xPtr, 0, xFunc, 0, 0, 0, #zName, {0} } #define INLINE_FUNC(zName, nArg, iArg, mFlags) \ {nArg, SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \ SQLITE_INT_TO_PTR(iArg), 0, noopFunc, 0, 0, 0, #zName, {0} } @@ -17417,7 +17501,12 @@ struct Column { u16 colFlags; /* Boolean properties. See COLFLAG_ defines below */ }; -/* Allowed values for Column.colFlags: +/* Allowed values for Column.colFlags. +** +** Constraints: +** TF_HasVirtual == COLFLAG_VIRTUAL +** TF_HasStored == COLFLAG_STORED +** TF_HasHidden == COLFLAG_HIDDEN */ #define COLFLAG_PRIMKEY 0x0001 /* Column is part of the primary key */ #define COLFLAG_HIDDEN 0x0002 /* A hidden column in a virtual table */ @@ -17593,7 +17682,6 @@ struct Table { #endif Trigger *pTrigger; /* List of triggers stored in pSchema */ Schema *pSchema; /* Schema that contains this table */ - Table *pNextZombie; /* Next on the Parse.pZombieTab list */ }; /* @@ -17607,11 +17695,12 @@ struct Table { ** ** Constraints: ** -** TF_HasVirtual == COLFLAG_Virtual -** TF_HasStored == COLFLAG_Stored +** TF_HasVirtual == COLFLAG_VIRTUAL +** TF_HasStored == COLFLAG_STORED +** TF_HasHidden == COLFLAG_HIDDEN */ #define TF_Readonly 0x0001 /* Read-only system table */ -#define TF_Ephemeral 0x0002 /* An ephemeral table */ +#define TF_HasHidden 0x0002 /* Has one or more hidden columns */ #define TF_HasPrimaryKey 0x0004 /* Table has a primary key */ #define TF_Autoincrement 0x0008 /* Integer primary key is autoincrement */ #define TF_HasStat1 0x0010 /* nRowLogEst set from sqlite_stat1 */ @@ -17626,6 +17715,7 @@ struct Table { #define TF_HasNotNull 0x0800 /* Contains NOT NULL constraints */ #define TF_Shadow 0x1000 /* True for a shadow table */ #define TF_HasStat4 0x2000 /* STAT4 info available for this table */ +#define TF_Ephemeral 0x4000 /* An ephemeral table */ /* ** Test to see whether or not a table is a virtual table. This is @@ -17722,16 +17812,22 @@ struct FKey { ** is returned. REPLACE means that preexisting database rows that caused ** a UNIQUE constraint violation are removed so that the new insert or ** update can proceed. Processing continues and no error is reported. +** UPDATE applies to insert operations only and means that the insert +** is omitted and the DO UPDATE clause of an upsert is run instead. ** -** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys. +** RESTRICT, SETNULL, SETDFLT, and CASCADE actions apply only to foreign keys. ** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the ** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign -** key is set to NULL. CASCADE means that a DELETE or UPDATE of the +** key is set to NULL. SETDFLT means that the foreign key is set +** to its default value. CASCADE means that a DELETE or UPDATE of the ** referenced table row is propagated into the row that holds the ** foreign key. ** +** The OE_Default value is a place holder that means to use whatever +** conflict resolution algorthm is required from context. +** ** The following symbolic values are used to record which type -** of action to take. +** of conflict resolution action to take. */ #define OE_None 0 /* There is no constraint to check */ #define OE_Rollback 1 /* Fail the operation and rollback the transaction */ @@ -17988,7 +18084,6 @@ struct AggInfo { } *aFunc; int nFunc; /* Number of entries in aFunc[] */ u32 selId; /* Select to which this AggInfo belongs */ - AggInfo *pNext; /* Next in list of them all */ }; /* @@ -18117,7 +18212,7 @@ struct Expr { ** TK_VARIABLE: variable number (always >= 1). ** TK_SELECT_COLUMN: column of the result vector */ i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */ - i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */ + int iRightJoinTable; /* If EP_FromJoin, the right table of the join */ AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */ union { Table *pTab; /* TK_COLUMN: Table containing column. Can be NULL @@ -18159,7 +18254,7 @@ struct Expr { #define EP_ConstFunc 0x080000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */ #define EP_CanBeNull 0x100000 /* Can be null despite NOT NULL constraint */ #define EP_Subquery 0x200000 /* Tree contains a TK_SELECT operator */ -#define EP_Alias 0x400000 /* Is an alias for a result set column */ + /* 0x400000 // Available */ #define EP_Leaf 0x800000 /* Expr.pLeft, .pRight, .u.pSelect all NULL */ #define EP_WinFunc 0x1000000 /* TK_FUNCTION with Expr.y.pWin set */ #define EP_Subrtn 0x2000000 /* Uses Expr.y.sub. TK_IN, _SELECT, or _EXISTS */ @@ -18307,6 +18402,45 @@ struct IdList { int nId; /* Number of identifiers on the list */ }; +/* +** The SrcItem object represents a single term in the FROM clause of a query. +** The SrcList object is mostly an array of SrcItems. +*/ +struct SrcItem { + Schema *pSchema; /* Schema to which this item is fixed */ + char *zDatabase; /* Name of database holding this table */ + char *zName; /* Name of the table */ + char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */ + Table *pTab; /* An SQL table corresponding to zName */ + Select *pSelect; /* A SELECT statement used in place of a table name */ + int addrFillSub; /* Address of subroutine to manifest a subquery */ + int regReturn; /* Register holding return address of addrFillSub */ + int regResult; /* Registers holding results of a co-routine */ + struct { + u8 jointype; /* Type of join between this table and the previous */ + unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */ + unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */ + unsigned isTabFunc :1; /* True if table-valued-function syntax */ + unsigned isCorrelated :1; /* True if sub-query is correlated */ + unsigned viaCoroutine :1; /* Implemented as a co-routine */ + unsigned isRecursive :1; /* True for recursive reference in WITH */ + unsigned fromDDL :1; /* Comes from sqlite_schema */ + unsigned isCte :1; /* This is a CTE */ + } fg; + int iCursor; /* The VDBE cursor number used to access this table */ + Expr *pOn; /* The ON clause of a join */ + IdList *pUsing; /* The USING clause of a join */ + Bitmask colUsed; /* Bit N (1<<N) set if column N of pTab is used */ + union { + char *zIndexedBy; /* Identifier from "INDEXED BY <zIndex>" clause */ + ExprList *pFuncArg; /* Arguments to table-valued-function */ + } u1; + union { + Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ + CteUse *pCteUse; /* CTE Usage info info fg.isCte is true */ + } u2; +}; + /* ** The following structure describes the FROM clause of a SELECT statement. ** Each table or subquery in the FROM clause is a separate element of @@ -18329,36 +18463,7 @@ struct IdList { struct SrcList { int nSrc; /* Number of tables or subqueries in the FROM clause */ u32 nAlloc; /* Number of entries allocated in a[] below */ - struct SrcList_item { - Schema *pSchema; /* Schema to which this item is fixed */ - char *zDatabase; /* Name of database holding this table */ - char *zName; /* Name of the table */ - char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */ - Table *pTab; /* An SQL table corresponding to zName */ - Select *pSelect; /* A SELECT statement used in place of a table name */ - int addrFillSub; /* Address of subroutine to manifest a subquery */ - int regReturn; /* Register holding return address of addrFillSub */ - int regResult; /* Registers holding results of a co-routine */ - struct { - u8 jointype; /* Type of join between this table and the previous */ - unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */ - unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */ - unsigned isTabFunc :1; /* True if table-valued-function syntax */ - unsigned isCorrelated :1; /* True if sub-query is correlated */ - unsigned viaCoroutine :1; /* Implemented as a co-routine */ - unsigned isRecursive :1; /* True for recursive reference in WITH */ - unsigned fromDDL :1; /* Comes from sqlite_schema */ - } fg; - int iCursor; /* The VDBE cursor number used to access this table */ - Expr *pOn; /* The ON clause of a join */ - IdList *pUsing; /* The USING clause of a join */ - Bitmask colUsed; /* Bit N (1<<N) set if column N of pTab is used */ - union { - char *zIndexedBy; /* Identifier from "INDEXED BY <zIndex>" clause */ - ExprList *pFuncArg; /* Arguments to table-valued-function */ - } u1; - Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ - } a[1]; /* One entry for each identifier on the list */ + SrcItem a[1]; /* One entry for each identifier on the list */ }; /* @@ -18434,6 +18539,7 @@ struct NameContext { ExprList *pEList; /* Optional list of result-set columns */ AggInfo *pAggInfo; /* Information about aggregates at this level */ Upsert *pUpsert; /* ON CONFLICT clause information from an upsert */ + int iBaseReg; /* For TK_REGISTER when parsing RETURNING */ } uNC; NameContext *pNext; /* Next outer name context. NULL for outermost */ int nRef; /* Number of names resolved by this context */ @@ -18462,6 +18568,7 @@ struct NameContext { #define NC_UEList 0x00080 /* True if uNC.pEList is used */ #define NC_UAggInfo 0x00100 /* True if uNC.pAggInfo is used */ #define NC_UUpsert 0x00200 /* True if uNC.pUpsert is used */ +#define NC_UBaseReg 0x00400 /* True if uNC.iBaseReg is used */ #define NC_MinMaxAgg 0x01000 /* min/max aggregates seen. See note above */ #define NC_Complex 0x02000 /* True if a function or subquery seen */ #define NC_AllowWin 0x04000 /* Window functions are allowed here */ @@ -18485,15 +18592,21 @@ struct NameContext { ** WHERE clause is omitted. */ struct Upsert { - ExprList *pUpsertTarget; /* Optional description of conflicting index */ + ExprList *pUpsertTarget; /* Optional description of conflict target */ Expr *pUpsertTargetWhere; /* WHERE clause for partial index targets */ ExprList *pUpsertSet; /* The SET clause from an ON CONFLICT UPDATE */ Expr *pUpsertWhere; /* WHERE clause for the ON CONFLICT UPDATE */ - /* The fields above comprise the parse tree for the upsert clause. - ** The fields below are used to transfer information from the INSERT - ** processing down into the UPDATE processing while generating code. - ** Upsert owns the memory allocated above, but not the memory below. */ - Index *pUpsertIdx; /* Constraint that pUpsertTarget identifies */ + Upsert *pNextUpsert; /* Next ON CONFLICT clause in the list */ + u8 isDoUpdate; /* True for DO UPDATE. False for DO NOTHING */ + /* Above this point is the parse tree for the ON CONFLICT clauses. + ** The next group of fields stores intermediate data. */ + void *pToFree; /* Free memory when deleting the Upsert object */ + /* All fields above are owned by the Upsert object and must be freed + ** when the Upsert is destroyed. The fields below are used to transfer + ** information from the INSERT processing down into the UPDATE processing + ** while generating code. The fields below are owned by the INSERT + ** statement and will be freed by INSERT processing. */ + Index *pUpsertIdx; /* UNIQUE constraint specified by pUpsertTarget */ SrcList *pUpsertSrc; /* Table to be updated */ int regData; /* First register holding array of VALUES */ int iDataCur; /* Index of the data cursor */ @@ -18573,6 +18686,8 @@ struct Select { #define SF_View 0x0200000 /* SELECT statement is a view */ #define SF_NoopOrderBy 0x0400000 /* ORDER BY is ignored for this query */ #define SF_UpdateFrom 0x0800000 /* Statement is an UPDATE...FROM */ +#define SF_PushDown 0x1000000 /* SELECT has be modified by push-down opt */ +#define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */ /* ** The results of a SELECT can be distributed in several ways, as defined @@ -18743,6 +18858,17 @@ struct TriggerPrg { # define DbMaskNonZero(M) (M)!=0 #endif +/* +** An instance of the ParseCleanup object specifies an operation that +** should be performed after parsing to deallocation resources obtained +** during the parse and which are no longer needed. +*/ +struct ParseCleanup { + ParseCleanup *pNext; /* Next cleanup task */ + void *pPtr; /* Pointer to object to deallocate */ + void (*xCleanup)(sqlite3*,void*); /* Deallocation routine */ +}; + /* ** An SQL parser context. A copy of this structure is passed through ** the parser and down into all the parser action routine in order to @@ -18774,6 +18900,9 @@ struct Parse { u8 okConstFactor; /* OK to factor out constants */ u8 disableLookaside; /* Number of times lookaside has been disabled */ u8 disableVtab; /* Disable all virtual tables for this parse */ +#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) + u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ +#endif int nRangeReg; /* Size of the temporary register block */ int iRangeReg; /* First register in temporary register block */ int nErr; /* Number of errors seen */ @@ -18801,12 +18930,15 @@ struct Parse { Parse *pToplevel; /* Parse structure for main program (or NULL) */ Table *pTriggerTab; /* Table triggers are being coded for */ Parse *pParentParse; /* Parent parser if this parser is nested */ - AggInfo *pAggList; /* List of all AggInfo objects */ - int addrCrTab; /* Address of OP_CreateBtree opcode on CREATE TABLE */ + union { + int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */ + Returning *pReturning; /* The RETURNING clause */ + } u1; u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u32 oldmask; /* Mask of old.* columns referenced */ u32 newmask; /* Mask of new.* columns referenced */ u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ + u8 bReturning; /* Coding a RETURNING trigger */ u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */ u8 disableTriggers; /* True to disable triggers */ @@ -18852,10 +18984,9 @@ struct Parse { Token sArg; /* Complete text of a module argument */ Table **apVtabLock; /* Pointer to virtual tables needing locking */ #endif - Table *pZombieTab; /* List of Table objects to delete after code gen */ TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */ With *pWith; /* Current WITH clause, or NULL */ - With *pWithToFree; /* Free this WITH object at the end of the parse */ + ParseCleanup *pCleanup; /* List of cleanup operations to run after parse */ #ifndef SQLITE_OMIT_ALTERTABLE RenameToken *pRename; /* Tokens subject to renaming by ALTER TABLE */ #endif @@ -18935,6 +19066,7 @@ struct AuthContext { #define OPFLAG_SAVEPOSITION 0x02 /* OP_Delete/Insert: save cursor pos */ #define OPFLAG_AUXDELETE 0x04 /* OP_Delete: index in a DELETE op */ #define OPFLAG_NOCHNG_MAGIC 0x6d /* OP_MakeRecord: serialtype 10 is ok */ +#define OPFLAG_PREFORMAT 0x80 /* OP_Insert uses preformatted cell */ /* * Each trigger present in the database schema is stored as an instance of @@ -18956,6 +19088,7 @@ struct Trigger { char *table; /* The table or view to which the trigger applies */ u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT */ u8 tr_tm; /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ + u8 bReturning; /* This trigger implements a RETURNING clause */ Expr *pWhen; /* The WHEN clause of the expression (may be NULL) */ IdList *pColumns; /* If this is an UPDATE OF <column-list> trigger, the <column-list> is stored here */ @@ -19014,14 +19147,15 @@ struct Trigger { * */ struct TriggerStep { - u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT */ + u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT, + ** or TK_RETURNING */ u8 orconf; /* OE_Rollback etc. */ Trigger *pTrig; /* The trigger that this step is a part of */ Select *pSelect; /* SELECT statement or RHS of INSERT INTO SELECT ... */ char *zTarget; /* Target table for DELETE, UPDATE, INSERT */ SrcList *pFrom; /* FROM clause for UPDATE statement (if any) */ Expr *pWhere; /* The WHERE clause for DELETE or UPDATE steps */ - ExprList *pExprList; /* SET clause for UPDATE */ + ExprList *pExprList; /* SET clause for UPDATE, or RETURNING clause */ IdList *pIdList; /* Column names for INSERT */ Upsert *pUpsert; /* Upsert clauses on an INSERT */ char *zSpan; /* Original SQL text of this command */ @@ -19030,18 +19164,16 @@ struct TriggerStep { }; /* -** The following structure contains information used by the sqliteFix... -** routines as they walk the parse tree to make database references -** explicit. +** Information about a RETURNING clause */ -typedef struct DbFixer DbFixer; -struct DbFixer { - Parse *pParse; /* The parsing context. Error messages written here */ - Schema *pSchema; /* Fix items to this schema */ - u8 bTemp; /* True for TEMP schema entries */ - const char *zDb; /* Make sure all objects are contained in this database */ - const char *zType; /* Type of the container - used for error messages */ - const Token *pName; /* Name of the container - used for error messages */ +struct Returning { + Parse *pParse; /* The parse that includes the RETURNING clause */ + ExprList *pReturnEL; /* List of expressions to return */ + Trigger retTrig; /* The transient trigger that implements RETURNING */ + TriggerStep retTStep; /* The trigger step */ + int iRetCur; /* Transient table holding RETURNING results */ + int nRetCol; /* Number of in pReturnEL after expansion */ + int iRetReg; /* Register array for holding a row of RETURNING */ }; /* @@ -19081,7 +19213,8 @@ typedef struct { /* ** Allowed values for mInitFlags */ -#define INITFLAG_AlterTable 0x0001 /* This is a reparse after ALTER TABLE */ +#define INITFLAG_AlterRename 0x0001 /* Reparse after a RENAME */ +#define INITFLAG_AlterDrop 0x0002 /* Reparse after a DROP COLUMN */ /* ** Structure containing global configuration data for the SQLite library. @@ -19193,10 +19326,26 @@ struct Walker { struct WhereConst *pConst; /* WHERE clause constants */ struct RenameCtx *pRename; /* RENAME COLUMN context */ struct Table *pTab; /* Table of generated column */ - struct SrcList_item *pSrcItem; /* A single FROM clause item */ + SrcItem *pSrcItem; /* A single FROM clause item */ + DbFixer *pFix; } u; }; +/* +** The following structure contains information used by the sqliteFix... +** routines as they walk the parse tree to make database references +** explicit. +*/ +struct DbFixer { + Parse *pParse; /* The parsing context. Error messages written here */ + Walker w; /* Walker object */ + Schema *pSchema; /* Fix items to this schema */ + u8 bTemp; /* True for TEMP schema entries */ + const char *zDb; /* Make sure all objects are contained in this database */ + const char *zType; /* Type of the container - used for error messages */ + const Token *pName; /* Name of the container - used for error messages */ +}; + /* Forward declarations */ SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*); SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*); @@ -19222,20 +19371,55 @@ SQLITE_PRIVATE void sqlite3SelectWalkAssert2(Walker*, Select*); #define WRC_Abort 2 /* Abandon the tree walk */ /* -** An instance of this structure represents a set of one or more CTEs -** (common table expressions) created by a single WITH clause. +** A single common table expression +*/ +struct Cte { + char *zName; /* Name of this CTE */ + ExprList *pCols; /* List of explicit column names, or NULL */ + Select *pSelect; /* The definition of this CTE */ + const char *zCteErr; /* Error message for circular references */ + CteUse *pUse; /* Usage information for this CTE */ + u8 eM10d; /* The MATERIALIZED flag */ +}; + +/* +** Allowed values for the materialized flag (eM10d): +*/ +#define M10d_Yes 0 /* AS MATERIALIZED */ +#define M10d_Any 1 /* Not specified. Query planner's choice */ +#define M10d_No 2 /* AS NOT MATERIALIZED */ + +/* +** An instance of the With object represents a WITH clause containing +** one or more CTEs (common table expressions). */ struct With { - int nCte; /* Number of CTEs in the WITH clause */ - With *pOuter; /* Containing WITH clause, or NULL */ - struct Cte { /* For each CTE in the WITH clause.... */ - char *zName; /* Name of this CTE */ - ExprList *pCols; /* List of explicit column names, or NULL */ - Select *pSelect; /* The definition of this CTE */ - const char *zCteErr; /* Error message for circular references */ - } a[1]; + int nCte; /* Number of CTEs in the WITH clause */ + With *pOuter; /* Containing WITH clause, or NULL */ + Cte a[1]; /* For each CTE in the WITH clause.... */ }; +/* +** The Cte object is not guaranteed to persist for the entire duration +** of code generation. (The query flattener or other parser tree +** edits might delete it.) The following object records information +** about each Common Table Expression that must be preserved for the +** duration of the parse. +** +** The CteUse objects are freed using sqlite3ParserAddCleanup() rather +** than sqlite3SelectDelete(), which is what enables them to persist +** until the end of code generation. +*/ +struct CteUse { + int nUse; /* Number of users of this CTE */ + int addrM9e; /* Start of subroutine to compute materialization */ + int regRtn; /* Return address register for addrM9e subroutine */ + int iCur; /* Ephemeral table holding the materialization */ + LogEst nRowEst; /* Estimated number of rows in the table */ + u8 eM10d; /* The MATERIALIZED flag */ +}; + + #ifdef SQLITE_DEBUG /* ** An instance of the TreeView object is used for printing the content of @@ -19313,7 +19497,6 @@ SQLITE_PRIVATE int sqlite3WindowCompare(Parse*, Window*, Window*, int); SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse*, Select*); SQLITE_PRIVATE void sqlite3WindowCodeStep(Parse*, Select*, WhereInfo*, int, int); SQLITE_PRIVATE int sqlite3WindowRewrite(Parse*, Select*); -SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse*, struct SrcList_item*); SQLITE_PRIVATE void sqlite3WindowUpdate(Parse*, Window*, Window*, FuncDef*); SQLITE_PRIVATE Window *sqlite3WindowDup(sqlite3 *db, Expr *pOwner, Window *p); SQLITE_PRIVATE Window *sqlite3WindowListDup(sqlite3 *db, Window *p); @@ -19582,6 +19765,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*, int); SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,Expr*,FuncDef*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); +SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*); @@ -19630,6 +19814,7 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse*,Expr*,const char*,const char*) SQLITE_PRIVATE void sqlite3AddCollateType(Parse*, Token*); SQLITE_PRIVATE void sqlite3AddGenerated(Parse*,Expr*,Token*); SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u8,Select*); +SQLITE_PRIVATE void sqlite3AddReturning(Parse*,ExprList*); SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*, sqlite3_vfs**,char**,char **); #define sqlite3CodecQueryParameters(A,B,C) 0 @@ -19695,7 +19880,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, T Token*, Select*, Expr*, IdList*); SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *); SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse*, SrcList*, ExprList*); -SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, struct SrcList_item *); +SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, SrcItem *); SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList*); SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*); SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*); @@ -19723,6 +19908,7 @@ SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereOrderByLimitOptLabel(WhereInfo*); +SQLITE_PRIVATE void sqlite3WhereMinMaxOptEarlyOut(Vdbe*,WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*); @@ -19756,7 +19942,7 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*); #define LOCATE_VIEW 0x01 #define LOCATE_NOERR 0x02 SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,u32 flags,const char*, const char*); -SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,struct SrcList_item *); +SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,SrcItem *); SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*); @@ -19884,6 +20070,7 @@ SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc(Parse*, TriggerStep*); #endif SQLITE_PRIVATE int sqlite3JoinType(Parse*, Token*, Token*, Token*); +SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol); SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr*,int); SQLITE_PRIVATE void sqlite3CreateForeignKey(Parse*, ExprList*, Token*, ExprList*, int); SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse*, int); @@ -19906,7 +20093,6 @@ SQLITE_PRIVATE void sqlite3FixInit(DbFixer*, Parse*, int, const char*, const Tok SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*); SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*); SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*); -SQLITE_PRIVATE int sqlite3FixExprList(DbFixer*, ExprList*); SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*); SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64); SQLITE_PRIVATE void sqlite3Int64ToText(i64,char*); @@ -19969,6 +20155,7 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8); SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*); SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...); SQLITE_PRIVATE void sqlite3Error(sqlite3*,int); +SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3*); SQLITE_PRIVATE void sqlite3SystemError(sqlite3*,int); SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n); SQLITE_PRIVATE u8 sqlite3HexToInt(int h); @@ -20032,7 +20219,6 @@ SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[]; SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[]; SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config; SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions; -SQLITE_API extern u32 sqlite3_unsupported_selecttrace; #ifndef SQLITE_OMIT_WSD SQLITE_PRIVATE int sqlite3PendingByte; #endif @@ -20051,6 +20237,7 @@ SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*, int); SQLITE_PRIVATE void sqlite3CodeRhsOfIN(Parse*, Expr*, int); SQLITE_PRIVATE int sqlite3CodeSubselect(Parse*, Expr*); SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*); +SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse*, SrcItem*); SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p); SQLITE_PRIVATE int sqlite3MatchEName( const struct ExprList_item*, @@ -20068,6 +20255,7 @@ SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *, Table *, int, int); SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *, Token *); SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *); +SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse*, SrcList*, Token*); SQLITE_PRIVATE void *sqlite3RenameTokenMap(Parse*, void*, Token*); SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse*, void *pTo, void *pFrom); SQLITE_PRIVATE void sqlite3RenameExprUnmap(Parse*, Expr*); @@ -20091,6 +20279,7 @@ SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo*); SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo*); SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse*, Index*); SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoFromExprList(Parse*, ExprList*, int, int); +SQLITE_PRIVATE const char *sqlite3SelectOpName(int); SQLITE_PRIVATE int sqlite3HasExplicitNulls(Parse*, ExprList*); #ifdef SQLITE_DEBUG @@ -20221,6 +20410,7 @@ SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *); SQLITE_PRIVATE void sqlite3ParserReset(Parse*); +SQLITE_PRIVATE void *sqlite3ParserAddCleanup(Parse*,void(*)(sqlite3*,void*),void*); #ifdef SQLITE_ENABLE_NORMALIZE SQLITE_PRIVATE char *sqlite3Normalize(Vdbe*, const char*); #endif @@ -20235,23 +20425,32 @@ SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3*, int, int, int*, int*); SQLITE_PRIVATE int sqlite3WalDefaultHook(void*,sqlite3*,const char*,int); #endif #ifndef SQLITE_OMIT_CTE -SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Token*,ExprList*,Select*); +SQLITE_PRIVATE Cte *sqlite3CteNew(Parse*,Token*,ExprList*,Select*,u8); +SQLITE_PRIVATE void sqlite3CteDelete(sqlite3*,Cte*); +SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Cte*); SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*); SQLITE_PRIVATE void sqlite3WithPush(Parse*, With*, u8); #else -#define sqlite3WithPush(x,y,z) -#define sqlite3WithDelete(x,y) +# define sqlite3CteNew(P,T,E,S) ((void*)0) +# define sqlite3CteDelete(D,C) +# define sqlite3CteWithAdd(P,W,C) ((void*)0) +# define sqlite3WithDelete(x,y) +# define sqlite3WithPush(x,y,z) #endif #ifndef SQLITE_OMIT_UPSERT -SQLITE_PRIVATE Upsert *sqlite3UpsertNew(sqlite3*,ExprList*,Expr*,ExprList*,Expr*); +SQLITE_PRIVATE Upsert *sqlite3UpsertNew(sqlite3*,ExprList*,Expr*,ExprList*,Expr*,Upsert*); SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3*,Upsert*); SQLITE_PRIVATE Upsert *sqlite3UpsertDup(sqlite3*,Upsert*); SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(Parse*,SrcList*,Upsert*); SQLITE_PRIVATE void sqlite3UpsertDoUpdate(Parse*,Upsert*,Table*,Index*,int); +SQLITE_PRIVATE Upsert *sqlite3UpsertOfIndex(Upsert*,Index*); +SQLITE_PRIVATE int sqlite3UpsertNextIsIPK(Upsert*); #else -#define sqlite3UpsertNew(v,w,x,y,z) ((Upsert*)0) +#define sqlite3UpsertNew(u,v,w,x,y,z) ((Upsert*)0) #define sqlite3UpsertDelete(x,y) -#define sqlite3UpsertDup(x,y) ((Upsert*)0) +#define sqlite3UpsertDup(x,y) ((Upsert*)0) +#define sqlite3UpsertOfIndex(x,y) ((Upsert*)0) +#define sqlite3UpsertNextIsIPK(x) 0 #endif @@ -20747,9 +20946,10 @@ SQLITE_PRIVATE int sqlite3PendingByte = 0x40000000; #endif /* -** Flags for select tracing and the ".selecttrace" macro of the CLI +** Tracing flags set by SQLITE_TESTCTRL_TRACEFLAGS. */ -SQLITE_API u32 sqlite3_unsupported_selecttrace = 0; +SQLITE_PRIVATE u32 sqlite3SelectTrace = 0; +SQLITE_PRIVATE u32 sqlite3WhereTrace = 0; /* #include "opcodes.h" */ /* @@ -20873,6 +21073,7 @@ struct VdbeCursor { Bool isEphemeral:1; /* True for an ephemeral table */ Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */ Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */ + Bool hasBeenDuped:1; /* This cursor was source or target of OP_OpenDup */ u16 seekHit; /* See the OP_SeekHit and OP_IfNoHope opcodes */ Btree *pBtx; /* Separate file holding temporary table */ i64 seqCount; /* Sequence counter */ @@ -21168,7 +21369,7 @@ struct Vdbe { Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */ Parse *pParse; /* Parsing context used to create this Vdbe */ ynVar nVar; /* Number of entries in aVar[] */ - u32 magic; /* Magic number for sanity checking */ + u32 iVdbeMagic; /* Magic number defining state of the SQL statement */ int nMem; /* Number of memory locations currently allocated */ int nCursor; /* Number of slots in apCsr[] */ u32 cacheCtr; /* VdbeCursor row cache generation counter */ @@ -22673,6 +22874,7 @@ static int isDate( int eType; memset(p, 0, sizeof(*p)); if( argc==0 ){ + if( !sqlite3NotPureFunc(context) ) return 1; return setDateTimeToCurrent(context, p); } if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT @@ -23173,6 +23375,8 @@ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ #ifdef SQLITE_TEST if( op!=SQLITE_FCNTL_COMMIT_PHASETWO && op!=SQLITE_FCNTL_LOCK_TIMEOUT + && op!=SQLITE_FCNTL_CKPT_DONE + && op!=SQLITE_FCNTL_CKPT_START ){ /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite ** is using a regular VFS, it is called after the corresponding @@ -23183,7 +23387,12 @@ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ ** The core must call OsFileControl() though, not OsFileControlHint(), ** as if a custom VFS (e.g. zipvfs) returns an error here, it probably ** means the commit really has failed and an error should be returned - ** to the user. */ + ** to the user. + ** + ** The CKPT_DONE and CKPT_START file-controls are write-only signals + ** to the cksumvfs. Their return code is meaningless and is ignored + ** by the SQLite core, so there is no point in simulating OOMs for them. + */ DO_OS_MALLOC_TEST(id); } #endif @@ -29082,7 +29291,7 @@ SQLITE_API void sqlite3_str_vappendf( case etSRCLIST: { SrcList *pSrc; int k; - struct SrcList_item *pItem; + SrcItem *pItem; if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return; pSrc = va_arg(ap, SrcList*); k = va_arg(ap, int); @@ -29147,7 +29356,7 @@ static int sqlite3StrAccumEnlarge(StrAccum *p, int N){ }else{ char *zOld = isMalloced(p) ? p->zText : 0; i64 szNew = p->nChar; - szNew += N + 1; + szNew += (sqlite3_int64)N + 1; if( szNew+p->nChar<=p->mxAlloc ){ /* Force exponential buffer size growth as long as it does not overflow, ** to avoid having to call this routine too often */ @@ -29650,7 +29859,10 @@ SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 m } sqlite3_str_appendf(&x, ")"); } - sqlite3_str_appendf(&x, " AS"); + if( pCte->pUse ){ + sqlite3_str_appendf(&x, " (pUse=0x%p, nUse=%d)", pCte->pUse, + pCte->pUse->nUse); + } sqlite3StrAccumFinish(&x); sqlite3TreeViewItem(pView, zLine, i<pWith->nCte-1); sqlite3TreeViewSelect(pView, pCte->pSelect, 0); @@ -29666,7 +29878,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 m SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc){ int i; for(i=0; i<pSrc->nSrc; i++){ - const struct SrcList_item *pItem = &pSrc->a[i]; + const SrcItem *pItem = &pSrc->a[i]; StrAccum x; char zLine[100]; sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0); @@ -29689,6 +29901,9 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) if( pItem->fg.fromDDL ){ sqlite3_str_appendf(&x, " DDL"); } + if( pItem->fg.isCte ){ + sqlite3_str_appendf(&x, " CteUse=0x%p", pItem->u2.pCteUse); + } sqlite3StrAccumFinish(&x); sqlite3TreeViewItem(pView, zLine, i<pSrc->nSrc-1); if( pItem->pSelect ){ @@ -31385,6 +31600,16 @@ SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code){ if( err_code || db->pErr ) sqlite3ErrorFinish(db, err_code); } +/* +** The equivalent of sqlite3Error(db, SQLITE_OK). Clear the error state +** and error message. +*/ +SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3 *db){ + assert( db!=0 ); + db->errCode = SQLITE_OK; + if( db->pErr ) sqlite3ValueSetNull(db->pErr); +} + /* ** Load the sqlite3.iSysErrno field if that is an appropriate thing ** to do based on the SQLite error code in rc. @@ -33331,103 +33556,105 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 77 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), /* 78 */ "SCopy" OpHelp("r[P2]=r[P1]"), /* 79 */ "IntCopy" OpHelp("r[P2]=r[P1]"), - /* 80 */ "ResultRow" OpHelp("output=r[P1@P2]"), - /* 81 */ "CollSeq" OpHelp(""), - /* 82 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), - /* 83 */ "RealAffinity" OpHelp(""), - /* 84 */ "Cast" OpHelp("affinity(r[P1])"), - /* 85 */ "Permutation" OpHelp(""), - /* 86 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), - /* 87 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"), - /* 88 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), - /* 89 */ "Column" OpHelp("r[P3]=PX"), - /* 90 */ "Affinity" OpHelp("affinity(r[P1@P2])"), - /* 91 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), - /* 92 */ "Count" OpHelp("r[P2]=count()"), - /* 93 */ "ReadCookie" OpHelp(""), - /* 94 */ "SetCookie" OpHelp(""), - /* 95 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), - /* 96 */ "OpenRead" OpHelp("root=P2 iDb=P3"), - /* 97 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), - /* 98 */ "OpenDup" OpHelp(""), - /* 99 */ "OpenAutoindex" OpHelp("nColumn=P2"), - /* 100 */ "OpenEphemeral" OpHelp("nColumn=P2"), - /* 101 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), - /* 102 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), - /* 103 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<<r[P1]"), - /* 104 */ "ShiftRight" OpHelp("r[P3]=r[P2]>>r[P1]"), - /* 105 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), - /* 106 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), - /* 107 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), - /* 108 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), - /* 109 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), - /* 110 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), - /* 111 */ "SorterOpen" OpHelp(""), - /* 112 */ "BitNot" OpHelp("r[P2]= ~r[P1]"), - /* 113 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), - /* 114 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), - /* 115 */ "String8" OpHelp("r[P2]='P4'"), - /* 116 */ "Close" OpHelp(""), - /* 117 */ "ColumnsUsed" OpHelp(""), - /* 118 */ "SeekScan" OpHelp("Scan-ahead up to P1 rows"), - /* 119 */ "SeekHit" OpHelp("set P2<=seekHit<=P3"), - /* 120 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), - /* 121 */ "NewRowid" OpHelp("r[P2]=rowid"), - /* 122 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), - /* 123 */ "Delete" OpHelp(""), - /* 124 */ "ResetCount" OpHelp(""), - /* 125 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), - /* 126 */ "SorterData" OpHelp("r[P2]=data"), - /* 127 */ "RowData" OpHelp("r[P2]=data"), - /* 128 */ "Rowid" OpHelp("r[P2]=rowid"), - /* 129 */ "NullRow" OpHelp(""), - /* 130 */ "SeekEnd" OpHelp(""), - /* 131 */ "IdxInsert" OpHelp("key=r[P2]"), - /* 132 */ "SorterInsert" OpHelp("key=r[P2]"), - /* 133 */ "IdxDelete" OpHelp("key=r[P2@P3]"), - /* 134 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"), - /* 135 */ "IdxRowid" OpHelp("r[P2]=rowid"), - /* 136 */ "FinishSeek" OpHelp(""), - /* 137 */ "Destroy" OpHelp(""), - /* 138 */ "Clear" OpHelp(""), - /* 139 */ "ResetSorter" OpHelp(""), - /* 140 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"), - /* 141 */ "SqlExec" OpHelp(""), - /* 142 */ "ParseSchema" OpHelp(""), - /* 143 */ "LoadAnalysis" OpHelp(""), - /* 144 */ "DropTable" OpHelp(""), - /* 145 */ "DropIndex" OpHelp(""), - /* 146 */ "DropTrigger" OpHelp(""), - /* 147 */ "IntegrityCk" OpHelp(""), - /* 148 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), - /* 149 */ "Param" OpHelp(""), - /* 150 */ "Real" OpHelp("r[P2]=P4"), - /* 151 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), - /* 152 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), - /* 153 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), - /* 154 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"), - /* 155 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 156 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 157 */ "AggValue" OpHelp("r[P3]=value N=P2"), - /* 158 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), - /* 159 */ "Expire" OpHelp(""), - /* 160 */ "CursorLock" OpHelp(""), - /* 161 */ "CursorUnlock" OpHelp(""), - /* 162 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), - /* 163 */ "VBegin" OpHelp(""), - /* 164 */ "VCreate" OpHelp(""), - /* 165 */ "VDestroy" OpHelp(""), - /* 166 */ "VOpen" OpHelp(""), - /* 167 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 168 */ "VRename" OpHelp(""), - /* 169 */ "Pagecount" OpHelp(""), - /* 170 */ "MaxPgcnt" OpHelp(""), - /* 171 */ "Trace" OpHelp(""), - /* 172 */ "CursorHint" OpHelp(""), - /* 173 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), - /* 174 */ "Noop" OpHelp(""), - /* 175 */ "Explain" OpHelp(""), - /* 176 */ "Abortable" OpHelp(""), + /* 80 */ "ChngCntRow" OpHelp("output=r[P1]"), + /* 81 */ "ResultRow" OpHelp("output=r[P1@P2]"), + /* 82 */ "CollSeq" OpHelp(""), + /* 83 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), + /* 84 */ "RealAffinity" OpHelp(""), + /* 85 */ "Cast" OpHelp("affinity(r[P1])"), + /* 86 */ "Permutation" OpHelp(""), + /* 87 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), + /* 88 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"), + /* 89 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), + /* 90 */ "Column" OpHelp("r[P3]=PX"), + /* 91 */ "Affinity" OpHelp("affinity(r[P1@P2])"), + /* 92 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), + /* 93 */ "Count" OpHelp("r[P2]=count()"), + /* 94 */ "ReadCookie" OpHelp(""), + /* 95 */ "SetCookie" OpHelp(""), + /* 96 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), + /* 97 */ "OpenRead" OpHelp("root=P2 iDb=P3"), + /* 98 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), + /* 99 */ "OpenDup" OpHelp(""), + /* 100 */ "OpenAutoindex" OpHelp("nColumn=P2"), + /* 101 */ "OpenEphemeral" OpHelp("nColumn=P2"), + /* 102 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), + /* 103 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), + /* 104 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<<r[P1]"), + /* 105 */ "ShiftRight" OpHelp("r[P3]=r[P2]>>r[P1]"), + /* 106 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), + /* 107 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), + /* 108 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), + /* 109 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), + /* 110 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), + /* 111 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), + /* 112 */ "SorterOpen" OpHelp(""), + /* 113 */ "BitNot" OpHelp("r[P2]= ~r[P1]"), + /* 114 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), + /* 115 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), + /* 116 */ "String8" OpHelp("r[P2]='P4'"), + /* 117 */ "Close" OpHelp(""), + /* 118 */ "ColumnsUsed" OpHelp(""), + /* 119 */ "SeekScan" OpHelp("Scan-ahead up to P1 rows"), + /* 120 */ "SeekHit" OpHelp("set P2<=seekHit<=P3"), + /* 121 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), + /* 122 */ "NewRowid" OpHelp("r[P2]=rowid"), + /* 123 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), + /* 124 */ "RowCell" OpHelp(""), + /* 125 */ "Delete" OpHelp(""), + /* 126 */ "ResetCount" OpHelp(""), + /* 127 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), + /* 128 */ "SorterData" OpHelp("r[P2]=data"), + /* 129 */ "RowData" OpHelp("r[P2]=data"), + /* 130 */ "Rowid" OpHelp("r[P2]=rowid"), + /* 131 */ "NullRow" OpHelp(""), + /* 132 */ "SeekEnd" OpHelp(""), + /* 133 */ "IdxInsert" OpHelp("key=r[P2]"), + /* 134 */ "SorterInsert" OpHelp("key=r[P2]"), + /* 135 */ "IdxDelete" OpHelp("key=r[P2@P3]"), + /* 136 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"), + /* 137 */ "IdxRowid" OpHelp("r[P2]=rowid"), + /* 138 */ "FinishSeek" OpHelp(""), + /* 139 */ "Destroy" OpHelp(""), + /* 140 */ "Clear" OpHelp(""), + /* 141 */ "ResetSorter" OpHelp(""), + /* 142 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"), + /* 143 */ "SqlExec" OpHelp(""), + /* 144 */ "ParseSchema" OpHelp(""), + /* 145 */ "LoadAnalysis" OpHelp(""), + /* 146 */ "DropTable" OpHelp(""), + /* 147 */ "DropIndex" OpHelp(""), + /* 148 */ "DropTrigger" OpHelp(""), + /* 149 */ "IntegrityCk" OpHelp(""), + /* 150 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), + /* 151 */ "Param" OpHelp(""), + /* 152 */ "Real" OpHelp("r[P2]=P4"), + /* 153 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), + /* 154 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), + /* 155 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), + /* 156 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"), + /* 157 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 158 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 159 */ "AggValue" OpHelp("r[P3]=value N=P2"), + /* 160 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), + /* 161 */ "Expire" OpHelp(""), + /* 162 */ "CursorLock" OpHelp(""), + /* 163 */ "CursorUnlock" OpHelp(""), + /* 164 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), + /* 165 */ "VBegin" OpHelp(""), + /* 166 */ "VCreate" OpHelp(""), + /* 167 */ "VDestroy" OpHelp(""), + /* 168 */ "VOpen" OpHelp(""), + /* 169 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 170 */ "VRename" OpHelp(""), + /* 171 */ "Pagecount" OpHelp(""), + /* 172 */ "MaxPgcnt" OpHelp(""), + /* 173 */ "Trace" OpHelp(""), + /* 174 */ "CursorHint" OpHelp(""), + /* 175 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), + /* 176 */ "Noop" OpHelp(""), + /* 177 */ "Explain" OpHelp(""), + /* 178 */ "Abortable" OpHelp(""), }; return azName[i]; } @@ -39996,7 +40223,8 @@ static int unixBackupDir(const char *z, int *pJ){ int j = *pJ; int i; if( j<=0 ) return 0; - for(i=j-1; ALWAYS(i>0) && z[i-1]!='/'; i--){} + for(i=j-1; i>0 && z[i-1]!='/'; i--){} + if( i==0 ) return 0; if( z[i]=='.' && i==j-2 && z[i+1]=='.' ) return 0; *pJ = i-1; return 1; @@ -50436,6 +50664,7 @@ static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc){ p->page.pExtra = &p[1]; p->isBulkLocal = 0; p->isAnchor = 0; + p->pLruPrev = 0; /* Initializing this saves a valgrind error */ } (*pCache->pnPurgeable)++; return p; @@ -52354,6 +52583,7 @@ struct PagerSavepoint { Bitvec *pInSavepoint; /* Set of pages in this savepoint */ Pgno nOrig; /* Original number of pages in file */ Pgno iSubRec; /* Index of first record in sub-journal */ + int bTruncateOnRelease; /* If stmt journal may be truncated on RELEASE */ #ifndef SQLITE_OMIT_WAL u32 aWalData[WAL_SAVEPOINT_NDATA]; /* WAL savepoint context */ #endif @@ -52989,6 +53219,9 @@ static int subjRequiresPage(PgHdr *pPg){ for(i=0; i<pPager->nSavepoint; i++){ p = &pPager->aSavepoint[i]; if( p->nOrig>=pgno && 0==sqlite3BitvecTestNotNull(p->pInSavepoint, pgno) ){ + for(i=i+1; i<pPager->nSavepoint; i++){ + pPager->aSavepoint[i].bTruncateOnRelease = 0; + } return 1; } } @@ -58767,6 +59000,7 @@ static SQLITE_NOINLINE int pagerOpenSavepoint(Pager *pPager, int nSavepoint){ } aNew[ii].iSubRec = pPager->nSubRec; aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize); + aNew[ii].bTruncateOnRelease = 1; if( !aNew[ii].pInSavepoint ){ return SQLITE_NOMEM_BKPT; } @@ -58848,13 +59082,15 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ /* If this is a release of the outermost savepoint, truncate ** the sub-journal to zero bytes in size. */ if( op==SAVEPOINT_RELEASE ){ - if( nNew==0 && isOpen(pPager->sjfd) ){ + PagerSavepoint *pRel = &pPager->aSavepoint[nNew]; + if( pRel->bTruncateOnRelease && isOpen(pPager->sjfd) ){ /* Only truncate if it is an in-memory sub-journal. */ if( sqlite3JournalIsInMemory(pPager->sjfd) ){ - rc = sqlite3OsTruncate(pPager->sjfd, 0); + i64 sz = (pPager->pageSize+4)*pRel->iSubRec; + rc = sqlite3OsTruncate(pPager->sjfd, sz); assert( rc==SQLITE_OK ); } - pPager->nSubRec = 0; + pPager->nSubRec = pRel->iSubRec; } } /* Else this is a rollback operation, playback the specified savepoint. @@ -64045,7 +64281,7 @@ struct Btree { u8 hasIncrblobCur; /* True if there are one or more Incrblob cursors */ int wantToLock; /* Number of nested calls to sqlite3BtreeEnter() */ int nBackup; /* Number of backup operations reading this btree */ - u32 iDataVersion; /* Combines with pBt->pPager->iDataVersion */ + u32 iBDataVersion; /* Combines with pBt->pPager->iDataVersion */ Btree *pNext; /* List of other sharable Btrees from the same db */ Btree *pPrev; /* Back pointer of the same list */ #ifdef SQLITE_DEBUG @@ -64150,6 +64386,7 @@ struct BtShared { Btree *pWriter; /* Btree with currently open write transaction */ #endif u8 *pTmpSpace; /* Temp space sufficient to hold a single cell */ + int nPreformatSize; /* Size of last cell written by TransferRow() */ }; /* @@ -65863,6 +66100,24 @@ static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow( pInfo->nSize = (u16)(&pInfo->pPayload[pInfo->nLocal] - pCell) + 4; } +/* +** Given a record with nPayload bytes of payload stored within btree +** page pPage, return the number of bytes of payload stored locally. +*/ +static int btreePayloadToLocal(MemPage *pPage, i64 nPayload){ + int maxLocal; /* Maximum amount of payload held locally */ + maxLocal = pPage->maxLocal; + if( nPayload<=maxLocal ){ + return nPayload; + }else{ + int minLocal; /* Minimum amount of payload held locally */ + int surplus; /* Overflow payload available for local storage */ + minLocal = pPage->minLocal; + surplus = minLocal + (nPayload - minLocal)%(pPage->pBt->usableSize-4); + return ( surplus <= maxLocal ) ? surplus : minLocal; + } +} + /* ** The following routines are implementations of the MemPage.xParseCell() ** method. @@ -67439,19 +67694,23 @@ static void freeTempSpace(BtShared *pBt){ */ SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){ BtShared *pBt = p->pBt; - BtCursor *pCur; /* Close all cursors opened via this handle. */ assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); - pCur = pBt->pCursor; - while( pCur ){ - BtCursor *pTmp = pCur; - pCur = pCur->pNext; - if( pTmp->pBtree==p ){ - sqlite3BtreeCloseCursor(pTmp); + + /* Verify that no other cursors have this Btree open */ +#ifdef SQLITE_DEBUG + { + BtCursor *pCur = pBt->pCursor; + while( pCur ){ + BtCursor *pTmp = pCur; + pCur = pCur->pNext; + assert( pTmp->pBtree!=p ); + } } +#endif /* Rollback any active transaction and free the handle structure. ** The call to sqlite3BtreeRollback() drops any table-locks held by @@ -67603,6 +67862,7 @@ SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, ((pageSize-1)&pageSize)==0 ){ assert( (pageSize & 7)==0 ); assert( !pBt->pCursor ); + if( nReserve>32 && pageSize==512 ) pageSize = 1024; pBt->pageSize = (u32)pageSize; freeTempSpace(pBt); } @@ -68832,7 +69092,7 @@ SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){ sqlite3BtreeLeave(p); return rc; } - p->iDataVersion--; /* Compensate for pPager->iDataVersion++; */ + p->iBDataVersion--; /* Compensate for pPager->iDataVersion++; */ pBt->inTransaction = TRANS_READ; btreeClearHasContent(pBt); } @@ -69242,7 +69502,14 @@ SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){ unlockBtreeIfUnused(pBt); sqlite3_free(pCur->aOverflow); sqlite3_free(pCur->pKey); - sqlite3BtreeLeave(pBtree); + if( (pBt->openFlags & BTREE_SINGLE) && pBt->pCursor==0 ){ + /* Since the BtShared is not sharable, there is no need to + ** worry about the missing sqlite3BtreeLeave() call here. */ + assert( pBtree->sharable==0 ); + sqlite3BtreeClose(pBtree); + }else{ + sqlite3BtreeLeave(pBtree); + } pCur->pBtree = 0; } return SQLITE_OK; @@ -72338,7 +72605,9 @@ static int balance_nonroot( } pgno = get4byte(pRight); while( 1 ){ - rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0); + if( rc==SQLITE_OK ){ + rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0); + } if( rc ){ memset(apOld, 0, (i+1)*sizeof(MemPage*)); goto balance_cleanup; @@ -72377,12 +72646,10 @@ static int balance_nonroot( if( pBt->btsFlags & BTS_FAST_SECURE ){ int iOff; + /* If the following if() condition is not true, the db is corrupted. + ** The call to dropCell() below will detect this. */ iOff = SQLITE_PTR_TO_INT(apDiv[i]) - SQLITE_PTR_TO_INT(pParent->aData); - if( (iOff+szNew[i])>(int)pBt->usableSize ){ - rc = SQLITE_CORRUPT_BKPT; - memset(apOld, 0, (i+1)*sizeof(MemPage*)); - goto balance_cleanup; - }else{ + if( (iOff+szNew[i])<=(int)pBt->usableSize ){ memcpy(&aOvflSpace[iOff], apDiv[i], szNew[i]); apDiv[i] = &aOvflSpace[apDiv[i]-pParent->aData]; } @@ -72676,6 +72943,9 @@ static int balance_nonroot( apOld[i] = 0; rc = sqlite3PagerWrite(pNew->pDbPage); nNew++; + if( sqlite3PagerPageRefcount(pNew->pDbPage)!=1+(i==(iParentIdx-nxDiv)) ){ + rc = SQLITE_CORRUPT_BKPT; + } if( rc ) goto balance_cleanup; }else{ assert( i>0 ); @@ -72712,7 +72982,7 @@ static int balance_nonroot( aPgOrder[i] = aPgno[i] = apNew[i]->pgno; aPgFlags[i] = apNew[i]->pDbPage->flags; for(j=0; j<i; j++){ - if( aPgno[j]==aPgno[i] ){ + if( NEVER(aPgno[j]==aPgno[i]) ){ /* This branch is taken if the set of sibling pages somehow contains ** duplicate entries. This can happen if the database is corrupt. ** It would be simpler to detect this as part of the loop below, but @@ -73380,7 +73650,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( unsigned char *oldCell; unsigned char *newCell = 0; - assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND))==flags ); + assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND|BTREE_PREFORMAT))==flags ); + assert( (flags & BTREE_PREFORMAT)==0 || seekResult || pCur->pKeyInfo==0 ); if( pCur->eState==CURSOR_FAULT ){ assert( pCur->skipNext!=SQLITE_OK ); @@ -73398,7 +73669,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** keys with no associated data. If the cursor was opened expecting an ** intkey table, the caller should be inserting integer keys with a ** blob of associated data. */ - assert( (pX->pKey==0)==(pCur->pKeyInfo==0) ); + assert( (flags & BTREE_PREFORMAT) || (pX->pKey==0)==(pCur->pKeyInfo==0) ); /* Save the positions of any other cursors open on this table. ** @@ -73508,7 +73779,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( || CORRUPT_DB ); pPage = pCur->pPage; - assert( pPage->intKey || pX->nKey>=0 ); + assert( pPage->intKey || pX->nKey>=0 || (flags & BTREE_PREFORMAT) ); assert( pPage->leaf || !pPage->intKey ); if( pPage->nFree<0 ){ if( pCur->eState>CURSOR_INVALID ){ @@ -73525,7 +73796,21 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( assert( pPage->isInit ); newCell = pBt->pTmpSpace; assert( newCell!=0 ); - rc = fillInCell(pPage, newCell, pX, &szNew); + if( flags & BTREE_PREFORMAT ){ + rc = SQLITE_OK; + szNew = pBt->nPreformatSize; + if( szNew<4 ) szNew = 4; + if( ISAUTOVACUUM && szNew>pPage->maxLocal ){ + CellInfo info; + pPage->xParseCell(pPage, newCell, &info); + if( info.nPayload!=info.nLocal ){ + Pgno ovfl = get4byte(&newCell[szNew-4]); + ptrmapPut(pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, &rc); + } + } + }else{ + rc = fillInCell(pPage, newCell, pX, &szNew); + } if( rc ) goto end_insert; assert( szNew==pPage->xCellSize(pPage, newCell) ); assert( szNew <= MX_CELL_SIZE(pBt) ); @@ -73632,6 +73917,114 @@ end_insert: return rc; } +/* +** This function is used as part of copying the current row from cursor +** pSrc into cursor pDest. If the cursors are open on intkey tables, then +** parameter iKey is used as the rowid value when the record is copied +** into pDest. Otherwise, the record is copied verbatim. +** +** This function does not actually write the new value to cursor pDest. +** Instead, it creates and populates any required overflow pages and +** writes the data for the new cell into the BtShared.pTmpSpace buffer +** for the destination database. The size of the cell, in bytes, is left +** in BtShared.nPreformatSize. The caller completes the insertion by +** calling sqlite3BtreeInsert() with the BTREE_PREFORMAT flag specified. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 iKey){ + int rc = SQLITE_OK; + BtShared *pBt = pDest->pBt; + u8 *aOut = pBt->pTmpSpace; /* Pointer to next output buffer */ + const u8 *aIn; /* Pointer to next input buffer */ + u32 nIn; /* Size of input buffer aIn[] */ + u32 nRem; /* Bytes of data still to copy */ + + getCellInfo(pSrc); + aOut += putVarint32(aOut, pSrc->info.nPayload); + if( pDest->pKeyInfo==0 ) aOut += putVarint(aOut, iKey); + nIn = pSrc->info.nLocal; + aIn = pSrc->info.pPayload; + if( aIn+nIn>pSrc->pPage->aDataEnd ){ + return SQLITE_CORRUPT_BKPT; + } + nRem = pSrc->info.nPayload; + if( nIn==nRem && nIn<pDest->pPage->maxLocal ){ + memcpy(aOut, aIn, nIn); + pBt->nPreformatSize = nIn + (aOut - pBt->pTmpSpace); + }else{ + Pager *pSrcPager = pSrc->pBt->pPager; + u8 *pPgnoOut = 0; + Pgno ovflIn = 0; + DbPage *pPageIn = 0; + MemPage *pPageOut = 0; + u32 nOut; /* Size of output buffer aOut[] */ + + nOut = btreePayloadToLocal(pDest->pPage, pSrc->info.nPayload); + pBt->nPreformatSize = nOut + (aOut - pBt->pTmpSpace); + if( nOut<pSrc->info.nPayload ){ + pPgnoOut = &aOut[nOut]; + pBt->nPreformatSize += 4; + } + + if( nRem>nIn ){ + if( aIn+nIn+4>pSrc->pPage->aDataEnd ){ + return SQLITE_CORRUPT_BKPT; + } + ovflIn = get4byte(&pSrc->info.pPayload[nIn]); + } + + do { + nRem -= nOut; + do{ + assert( nOut>0 ); + if( nIn>0 ){ + int nCopy = MIN(nOut, nIn); + memcpy(aOut, aIn, nCopy); + nOut -= nCopy; + nIn -= nCopy; + aOut += nCopy; + aIn += nCopy; + } + if( nOut>0 ){ + sqlite3PagerUnref(pPageIn); + pPageIn = 0; + rc = sqlite3PagerGet(pSrcPager, ovflIn, &pPageIn, PAGER_GET_READONLY); + if( rc==SQLITE_OK ){ + aIn = (const u8*)sqlite3PagerGetData(pPageIn); + ovflIn = get4byte(aIn); + aIn += 4; + nIn = pSrc->pBt->usableSize - 4; + } + } + }while( rc==SQLITE_OK && nOut>0 ); + + if( rc==SQLITE_OK && nRem>0 ){ + Pgno pgnoNew; + MemPage *pNew = 0; + rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); + put4byte(pPgnoOut, pgnoNew); + if( ISAUTOVACUUM && pPageOut ){ + ptrmapPut(pBt, pgnoNew, PTRMAP_OVERFLOW2, pPageOut->pgno, &rc); + } + releasePage(pPageOut); + pPageOut = pNew; + if( pPageOut ){ + pPgnoOut = pPageOut->aData; + put4byte(pPgnoOut, 0); + aOut = &pPgnoOut[4]; + nOut = MIN(pBt->usableSize - 4, nRem); + } + } + }while( nRem>0 && rc==SQLITE_OK ); + + releasePage(pPageOut); + sqlite3PagerUnref(pPageIn); + } + + return rc; +} + /* ** Delete the entry that the cursor is pointing to. ** @@ -74229,7 +74622,7 @@ SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){ assert( idx>=0 && idx<=15 ); if( idx==BTREE_DATA_VERSION ){ - *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iDataVersion; + *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iBDataVersion; }else{ *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]); } @@ -78020,7 +78413,7 @@ SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){ p->pNext = db->pVdbe; p->pPrev = 0; db->pVdbe = p; - p->magic = VDBE_MAGIC_INIT; + p->iVdbeMagic = VDBE_MAGIC_INIT; p->pParse = pParse; pParse->pVdbe = p; assert( pParse->aLabel==0 ); @@ -78221,7 +78614,7 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ VdbeOp *pOp; i = p->nOp; - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); assert( op>=0 && op<0xff ); if( p->nOpAlloc<=i ){ return growOp3(p, op, p1, p2, p3); @@ -78456,9 +78849,10 @@ SQLITE_PRIVATE void sqlite3VdbeExplainPop(Parse *pParse){ ** The zWhere string must have been obtained from sqlite3_malloc(). ** This routine will take ownership of the allocated memory. */ -SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere){ +SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere, u16 p5){ int j; sqlite3VdbeAddOp4(p, OP_ParseSchema, iDb, 0, 0, zWhere, P4_DYNAMIC); + sqlite3VdbeChangeP5(p, p5); for(j=0; j<p->db->nDb; j++) sqlite3VdbeUsesBtree(p, j); sqlite3MayAbort(p->pParse); } @@ -78550,7 +78944,7 @@ static SQLITE_NOINLINE void resizeResolveLabel(Parse *p, Vdbe *v, int j){ SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){ Parse *p = v->pParse; int j = ADDR(x); - assert( v->magic==VDBE_MAGIC_INIT ); + assert( v->iVdbeMagic==VDBE_MAGIC_INIT ); assert( j<-p->nLabel ); assert( j>=0 ); #ifdef SQLITE_DEBUG @@ -78875,7 +79269,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ ** Return the address of the next instruction to be inserted. */ SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe *p){ - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); return p->nOp; } @@ -78960,7 +79354,7 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList( int i; VdbeOp *pOut, *pFirst; assert( nOp>0 ); - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); if( p->nOp + nOp > p->nOpAlloc && growOpArray(p, nOp) ){ return 0; } @@ -79284,7 +79678,7 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int sqlite3 *db; assert( p!=0 ); db = p->db; - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); assert( p->aOp!=0 || db->mallocFailed ); if( db->mallocFailed ){ if( n!=P4_VTAB ) freeP4(db, n, (void*)*(char**)&zP4); @@ -79413,7 +79807,7 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ /* C89 specifies that the constant "dummy" will be initialized to all ** zeros, which is correct. MSVC generates a warning, nevertheless. */ static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */ - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); if( addr<0 ){ addr = p->nOp - 1; } @@ -80098,7 +80492,7 @@ SQLITE_PRIVATE int sqlite3VdbeList( Op *pOp; /* Current opcode */ assert( p->explain ); - assert( p->magic==VDBE_MAGIC_RUN ); + assert( p->iVdbeMagic==VDBE_MAGIC_RUN ); assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY || p->rc==SQLITE_NOMEM ); /* Even though this opcode does not use dynamic strings for @@ -80278,14 +80672,14 @@ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){ int i; #endif assert( p!=0 ); - assert( p->magic==VDBE_MAGIC_INIT || p->magic==VDBE_MAGIC_RESET ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT || p->iVdbeMagic==VDBE_MAGIC_RESET ); /* There should be at least one opcode. */ assert( p->nOp>0 ); /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. */ - p->magic = VDBE_MAGIC_RUN; + p->iVdbeMagic = VDBE_MAGIC_RUN; #ifdef SQLITE_DEBUG for(i=0; i<p->nMem; i++){ @@ -80341,8 +80735,10 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( assert( p!=0 ); assert( p->nOp>0 ); assert( pParse!=0 ); - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); assert( pParse==p->pParse ); + p->pVList = pParse->pVList; + pParse->pVList = 0; db = p->db; assert( db->mallocFailed==0 ); nVar = pParse->nVar; @@ -80427,8 +80823,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( } } - p->pVList = pParse->pVList; - pParse->pVList = 0; if( db->mallocFailed ){ p->nVar = 0; p->nCursor = 0; @@ -80456,20 +80850,15 @@ SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ return; } assert( pCx->pBtx==0 || pCx->eCurType==CURTYPE_BTREE ); + assert( pCx->pBtx==0 || pCx->isEphemeral ); switch( pCx->eCurType ){ case CURTYPE_SORTER: { sqlite3VdbeSorterClose(p->db, pCx); break; } case CURTYPE_BTREE: { - if( pCx->isEphemeral ){ - if( pCx->pBtx ) sqlite3BtreeClose(pCx->pBtx); - /* The pCx->pCursor will be close automatically, if it exists, by - ** the call above. */ - }else{ - assert( pCx->uc.pCursor!=0 ); - sqlite3BtreeCloseCursor(pCx->uc.pCursor); - } + assert( pCx->uc.pCursor!=0 ); + sqlite3BtreeCloseCursor(pCx->uc.pCursor); break; } #ifndef SQLITE_OMIT_VIRTUALTABLE @@ -81026,7 +81415,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ ** one, or the complete transaction if there is no statement transaction. */ - if( p->magic!=VDBE_MAGIC_RUN ){ + if( p->iVdbeMagic!=VDBE_MAGIC_RUN ){ return SQLITE_OK; } if( db->mallocFailed ){ @@ -81184,7 +81573,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ assert( db->nVdbeRead>=db->nVdbeWrite ); assert( db->nVdbeWrite>=0 ); } - p->magic = VDBE_MAGIC_HALT; + p->iVdbeMagic = VDBE_MAGIC_HALT; checkActiveVdbeCnt(db); if( db->mallocFailed ){ p->rc = SQLITE_NOMEM_BKPT; @@ -81357,7 +81746,7 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){ } } #endif - p->magic = VDBE_MAGIC_RESET; + p->iVdbeMagic = VDBE_MAGIC_RESET; return p->rc & db->errMask; } @@ -81367,7 +81756,7 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){ */ SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe *p){ int rc = SQLITE_OK; - if( p->magic==VDBE_MAGIC_RUN || p->magic==VDBE_MAGIC_HALT ){ + if( p->iVdbeMagic==VDBE_MAGIC_RUN || p->iVdbeMagic==VDBE_MAGIC_HALT ){ rc = sqlite3VdbeReset(p); assert( (rc & p->db->errMask)==rc ); } @@ -81428,7 +81817,7 @@ SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ vdbeFreeOpArray(db, pSub->aOp, pSub->nOp); sqlite3DbFree(db, pSub); } - if( p->magic!=VDBE_MAGIC_INIT ){ + if( p->iVdbeMagic!=VDBE_MAGIC_INIT ){ releaseMemArray(p->aVar, p->nVar); sqlite3DbFree(db, p->pVList); sqlite3DbFree(db, p->pFree); @@ -81476,7 +81865,7 @@ SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){ if( p->pNext ){ p->pNext->pPrev = p->pPrev; } - p->magic = VDBE_MAGIC_DEAD; + p->iVdbeMagic = VDBE_MAGIC_DEAD; p->db = 0; sqlite3DbFreeNN(db, p); } @@ -81553,6 +81942,7 @@ SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor **pp, u32 *piCol){ assert( p->eCurType==CURTYPE_BTREE || p->eCurType==CURTYPE_PSEUDO ); if( p->deferredMoveto ){ u32 iMap; + assert( !p->isEphemeral ); if( p->aAltMap && (iMap = p->aAltMap[1+*piCol])>0 && !p->nullRow ){ *pp = p->pAltCursor; *piCol = iMap - 1; @@ -83855,7 +84245,7 @@ static int sqlite3Step(Vdbe *p){ int rc; assert(p); - if( p->magic!=VDBE_MAGIC_RUN ){ + if( p->iVdbeMagic!=VDBE_MAGIC_RUN ){ /* We used to require that sqlite3_reset() be called before retrying ** sqlite3_step() after any error or after SQLITE_DONE. But beginning ** with version 3.7.0, we changed this so that sqlite3_reset() would @@ -84571,7 +84961,7 @@ static int vdbeUnbind(Vdbe *p, int i){ return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(p->db->mutex); - if( p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){ + if( p->iVdbeMagic!=VDBE_MAGIC_RUN || p->pc>=0 ){ sqlite3Error(p->db, SQLITE_MISUSE); sqlite3_mutex_leave(p->db->mutex); sqlite3_log(SQLITE_MISUSE, @@ -84925,7 +85315,7 @@ SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt){ */ SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){ Vdbe *v = (Vdbe*)pStmt; - return v!=0 && v->magic==VDBE_MAGIC_RUN && v->pc>=0; + return v!=0 && v->iVdbeMagic==VDBE_MAGIC_RUN && v->pc>=0; } /* @@ -85417,7 +85807,7 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( assert( idx>0 ); } zRawSql += nToken; - nextIndex = idx + 1; + nextIndex = MAX(idx + 1, nextIndex); assert( idx>0 && idx<=p->nVar ); pVar = &p->aVar[idx-1]; if( pVar->flags & MEM_Null ){ @@ -85761,11 +86151,6 @@ static VdbeCursor *allocateCursor( assert( iCur>=0 && iCur<p->nCursor ); if( p->apCsr[iCur] ){ /*OPTIMIZATION-IF-FALSE*/ - /* Before calling sqlite3VdbeFreeCursor(), ensure the isEphemeral flag - ** is clear. Otherwise, if this is an ephemeral cursor created by - ** OP_OpenDup, the cursor will not be closed and will still be part - ** of a BtShared.pCursor list. */ - if( p->apCsr[iCur]->pBtx==0 ) p->apCsr[iCur]->isEphemeral = 0; sqlite3VdbeFreeCursor(p, p->apCsr[iCur]); p->apCsr[iCur] = 0; } @@ -86263,7 +86648,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec( #endif /*** INSERT STACK UNION HERE ***/ - assert( p->magic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */ + assert( p->iVdbeMagic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */ sqlite3VdbeEnter(p); #ifndef SQLITE_OMIT_PROGRESS_CALLBACK if( db->xProgress ){ @@ -87023,6 +87408,26 @@ case OP_IntCopy: { /* out2 */ break; } +/* Opcode: ChngCntRow P1 P2 * * * +** Synopsis: output=r[P1] +** +** Output value in register P1 as the chance count for a DML statement, +** due to the "PRAGMA count_changes=ON" setting. Or, if there was a +** foreign key error in the statement, trigger the error now. +** +** This opcode is a variant of OP_ResultRow that checks the foreign key +** immediate constraint count and throws an error if the count is +** non-zero. The P2 opcode must be 1. +*/ +case OP_ChngCntRow: { + assert( pOp->p2==1 ); + if( (rc = sqlite3VdbeCheckFk(p,0))!=SQLITE_OK ){ + goto abort_due_to_error; + } + /* Fall through to the next case, OP_ResultRow */ + /* no break */ deliberate_fall_through +} + /* Opcode: ResultRow P1 P2 * * * ** Synopsis: output=r[P1@P2] ** @@ -87039,34 +87444,6 @@ case OP_ResultRow: { assert( pOp->p1>0 ); assert( pOp->p1+pOp->p2<=(p->nMem+1 - p->nCursor)+1 ); - /* If this statement has violated immediate foreign key constraints, do - ** not return the number of rows modified. And do not RELEASE the statement - ** transaction. It needs to be rolled back. */ - if( SQLITE_OK!=(rc = sqlite3VdbeCheckFk(p, 0)) ){ - assert( db->flags&SQLITE_CountRows ); - assert( p->usesStmtJournal ); - goto abort_due_to_error; - } - - /* If the SQLITE_CountRows flag is set in sqlite3.flags mask, then - ** DML statements invoke this opcode to return the number of rows - ** modified to the user. This is the only way that a VM that - ** opens a statement transaction may invoke this opcode. - ** - ** In case this is such a statement, close any statement transaction - ** opened by this VM before returning control to the user. This is to - ** ensure that statement-transactions are always nested, not overlapping. - ** If the open statement-transaction is not closed here, then the user - ** may step another VM that opens its own statement transaction. This - ** may lead to overlapping statement transactions. - ** - ** The statement transaction is never a top-level transaction. Hence - ** the RELEASE call below can never fail. - */ - assert( p->iStatement==0 || db->flags&SQLITE_CountRows ); - rc = sqlite3VdbeCloseStatement(p, SAVEPOINT_RELEASE); - assert( rc==SQLITE_OK ); - /* Invalidate all ephemeral cursor row caches */ p->cacheCtr = (p->cacheCtr + 2)|1; @@ -89459,7 +89836,7 @@ case OP_OpenDup: { pOrig = p->apCsr[pOp->p2]; assert( pOrig ); - assert( pOrig->pBtx!=0 ); /* Only ephemeral cursors can be duplicated */ + assert( pOrig->isEphemeral ); /* Only ephemeral cursors can be duplicated */ pCx = allocateCursor(p, pOp->p1, pOrig->nField, -1, CURTYPE_BTREE); if( pCx==0 ) goto no_mem; @@ -89469,7 +89846,10 @@ case OP_OpenDup: { pCx->isTable = pOrig->isTable; pCx->pgnoRoot = pOrig->pgnoRoot; pCx->isOrdered = pOrig->isOrdered; - rc = sqlite3BtreeCursor(pOrig->pBtx, pCx->pgnoRoot, BTREE_WRCSR, + pCx->pBtx = pOrig->pBtx; + pCx->hasBeenDuped = 1; + pOrig->hasBeenDuped = 1; + rc = sqlite3BtreeCursor(pCx->pBtx, pCx->pgnoRoot, BTREE_WRCSR, pCx->pKeyInfo, pCx->uc.pCursor); /* The sqlite3BtreeCursor() routine can only fail for the first cursor ** opened for a database. Since there is already an open cursor when this @@ -89535,9 +89915,10 @@ case OP_OpenEphemeral: { aMem[pOp->p3].z = ""; } pCx = p->apCsr[pOp->p1]; - if( pCx && pCx->pBtx ){ - /* If the ephermeral table is already open, erase all existing content - ** so that the table is empty again, rather than creating a new table. */ + if( pCx && !pCx->hasBeenDuped ){ + /* If the ephermeral table is already open and has no duplicates from + ** OP_OpenDup, then erase all existing content so that the table is + ** empty again, rather than creating a new table. */ assert( pCx->isEphemeral ); pCx->seqCount = 0; pCx->cacheStatus = CACHE_STALE; @@ -89551,33 +89932,36 @@ case OP_OpenEphemeral: { vfsFlags); if( rc==SQLITE_OK ){ rc = sqlite3BtreeBeginTrans(pCx->pBtx, 1, 0); - } - if( rc==SQLITE_OK ){ - /* If a transient index is required, create it by calling - ** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before - ** opening it. If a transient table is required, just use the - ** automatically created table with root-page 1 (an BLOB_INTKEY table). - */ - if( (pCx->pKeyInfo = pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ - assert( pOp->p4type==P4_KEYINFO ); - rc = sqlite3BtreeCreateTable(pCx->pBtx, &pCx->pgnoRoot, - BTREE_BLOBKEY | pOp->p5); - if( rc==SQLITE_OK ){ - assert( pCx->pgnoRoot==SCHEMA_ROOT+1 ); - assert( pKeyInfo->db==db ); - assert( pKeyInfo->enc==ENC(db) ); - rc = sqlite3BtreeCursor(pCx->pBtx, pCx->pgnoRoot, BTREE_WRCSR, - pKeyInfo, pCx->uc.pCursor); + if( rc==SQLITE_OK ){ + /* If a transient index is required, create it by calling + ** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before + ** opening it. If a transient table is required, just use the + ** automatically created table with root-page 1 (an BLOB_INTKEY table). + */ + if( (pCx->pKeyInfo = pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ + assert( pOp->p4type==P4_KEYINFO ); + rc = sqlite3BtreeCreateTable(pCx->pBtx, &pCx->pgnoRoot, + BTREE_BLOBKEY | pOp->p5); + if( rc==SQLITE_OK ){ + assert( pCx->pgnoRoot==SCHEMA_ROOT+1 ); + assert( pKeyInfo->db==db ); + assert( pKeyInfo->enc==ENC(db) ); + rc = sqlite3BtreeCursor(pCx->pBtx, pCx->pgnoRoot, BTREE_WRCSR, + pKeyInfo, pCx->uc.pCursor); + } + pCx->isTable = 0; + }else{ + pCx->pgnoRoot = SCHEMA_ROOT; + rc = sqlite3BtreeCursor(pCx->pBtx, SCHEMA_ROOT, BTREE_WRCSR, + 0, pCx->uc.pCursor); + pCx->isTable = 1; } - pCx->isTable = 0; - }else{ - pCx->pgnoRoot = SCHEMA_ROOT; - rc = sqlite3BtreeCursor(pCx->pBtx, SCHEMA_ROOT, BTREE_WRCSR, - 0, pCx->uc.pCursor); - pCx->isTable = 1; + } + pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); + if( rc ){ + sqlite3BtreeClose(pCx->pBtx); } } - pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); } if( rc ) goto abort_due_to_error; pCx->nullRow = 1; @@ -90011,13 +90395,13 @@ seek_not_found: ** ** There are three possible outcomes from this opcode:<ol> ** -** <li> If after This.P1 steps, the cursor is still point to a place that -** is earlier in the btree than the target row, -** then fall through into the subsquence OP_SeekGE opcode. +** <li> If after This.P1 steps, the cursor is still pointing to a place that +** is earlier in the btree than the target row, then fall through +** into the subsquence OP_SeekGE opcode. ** ** <li> If the cursor is successfully moved to the target row by 0 or more ** sqlite3BtreeNext() calls, then jump to This.P2, which will land just -** past the OP_IdxGT opcode that follows the OP_SeekGE. +** past the OP_IdxGT or OP_IdxGE opcode that follows the OP_SeekGE. ** ** <li> If the cursor ends up past the target row (indicating the the target ** row does not exist in the btree) then jump to SeekOP.P2. @@ -90034,7 +90418,8 @@ case OP_SeekScan: { /* pOp->p2 points to the first instruction past the OP_IdxGT that ** follows the OP_SeekGE. */ assert( pOp->p2>=(int)(pOp-aOp)+2 ); - assert( aOp[pOp->p2-1].opcode==OP_IdxGT ); + assert( aOp[pOp->p2-1].opcode==OP_IdxGT || aOp[pOp->p2-1].opcode==OP_IdxGE ); + testcase( aOp[pOp->p2-1].opcode==OP_IdxGE ); assert( pOp[1].p1==aOp[pOp->p2-1].p1 ); assert( pOp[1].p2==aOp[pOp->p2-1].p2 ); assert( pOp[1].p3==aOp[pOp->p2-1].p3 ); @@ -90487,8 +90872,10 @@ case OP_NewRowid: { /* out2 */ VdbeCursor *pC; /* Cursor of table to get the new rowid */ int res; /* Result of an sqlite3BtreeLast() */ int cnt; /* Counter to limit the number of searches */ +#ifndef SQLITE_OMIT_AUTOINCREMENT Mem *pMem; /* Register holding largest rowid for AUTOINCREMENT */ VdbeFrame *pFrame; /* Root frame of VDBE */ +#endif v = 0; res = 0; @@ -90704,7 +91091,8 @@ case OP_Insert: { } x.pKey = 0; rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, - (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), seekResult + (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION|OPFLAG_PREFORMAT)), + seekResult ); pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; @@ -90721,6 +91109,33 @@ case OP_Insert: { break; } +/* Opcode: RowCell P1 P2 P3 * * +** +** P1 and P2 are both open cursors. Both must be opened on the same type +** of table - intkey or index. This opcode is used as part of copying +** the current row from P2 into P1. If the cursors are opened on intkey +** tables, register P3 contains the rowid to use with the new record in +** P1. If they are opened on index tables, P3 is not used. +** +** This opcode must be followed by either an Insert or InsertIdx opcode +** with the OPFLAG_PREFORMAT flag set to complete the insert operation. +*/ +case OP_RowCell: { + VdbeCursor *pDest; /* Cursor to write to */ + VdbeCursor *pSrc; /* Cursor to read from */ + i64 iKey; /* Rowid value to insert with */ + assert( pOp[1].opcode==OP_Insert || pOp[1].opcode==OP_IdxInsert ); + assert( pOp[1].opcode==OP_Insert || pOp->p3==0 ); + assert( pOp[1].opcode==OP_IdxInsert || pOp->p3>0 ); + assert( pOp[1].p5 & OPFLAG_PREFORMAT ); + pDest = p->apCsr[pOp->p1]; + pSrc = p->apCsr[pOp->p2]; + iKey = pOp->p3 ? aMem[pOp->p3].u.i : 0; + rc = sqlite3BtreeTransferRow(pDest->uc.pCursor, pSrc->uc.pCursor, iKey); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + break; +}; + /* Opcode: Delete P1 P2 P3 P4 P5 ** ** Delete the record at which the P1 cursor is currently pointing. @@ -91376,7 +91791,7 @@ case OP_IdxInsert: { /* in2 */ assert( pC!=0 ); assert( !isSorter(pC) ); pIn2 = &aMem[pOp->p2]; - assert( pIn2->flags & MEM_Blob ); + assert( (pIn2->flags & MEM_Blob) || (pOp->p5 & OPFLAG_PREFORMAT) ); if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; assert( pC->eCurType==CURTYPE_BTREE ); assert( pC->isTable==0 ); @@ -91387,7 +91802,7 @@ case OP_IdxInsert: { /* in2 */ x.aMem = aMem + pOp->p3; x.nMem = (u16)pOp->p4.i; rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, - (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), + (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION|OPFLAG_PREFORMAT)), ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0) ); assert( pC->deferredMoveto==0 ); @@ -91460,7 +91875,7 @@ case OP_IdxDelete: { rc = sqlite3BtreeDelete(pCrsr, BTREE_AUXDELETE); if( rc ) goto abort_due_to_error; }else if( pOp->p5 ){ - rc = SQLITE_CORRUPT_INDEX; + rc = sqlite3ReportError(SQLITE_CORRUPT_INDEX, __LINE__, "index corruption"); goto abort_due_to_error; } assert( pC->deferredMoveto==0 ); @@ -91539,6 +91954,8 @@ case OP_IdxRowid: { /* out2 */ pTabCur->deferredMoveto = 1; assert( pOp->p4type==P4_INTARRAY || pOp->p4.ai==0 ); pTabCur->aAltMap = pOp->p4.ai; + assert( !pC->isEphemeral ); + assert( !pTabCur->isEphemeral ); pTabCur->pAltCursor = pC; }else{ pOut = out2Prerelease(p, pOp); @@ -91886,7 +92303,7 @@ case OP_ParseSchema: { if( pOp->p4.z==0 ){ sqlite3SchemaClear(db->aDb[iDb].pSchema); db->mDbFlags &= ~DBFLAG_SchemaKnownOk; - rc = sqlite3InitOne(db, iDb, &p->zErrMsg, INITFLAG_AlterTable); + rc = sqlite3InitOne(db, iDb, &p->zErrMsg, pOp->p5); db->mDbFlags |= DBFLAG_SchemaChange; p->expired = 0; }else @@ -97594,7 +98011,6 @@ struct MemJournal { int nChunkSize; /* In-memory chunk-size */ int nSpill; /* Bytes of data before flushing */ - int nSize; /* Bytes of data currently in memory */ FileChunk *pFirst; /* Head of in-memory chunk-list */ FilePoint endpoint; /* Pointer to the end of the file */ FilePoint readpoint; /* Pointer to the end of the last xRead() */ @@ -97655,14 +98071,13 @@ static int memjrnlRead( /* ** Free the list of FileChunk structures headed at MemJournal.pFirst. */ -static void memjrnlFreeChunks(MemJournal *p){ +static void memjrnlFreeChunks(FileChunk *pFirst){ FileChunk *pIter; FileChunk *pNext; - for(pIter=p->pFirst; pIter; pIter=pNext){ + for(pIter=pFirst; pIter; pIter=pNext){ pNext = pIter->pNext; sqlite3_free(pIter); } - p->pFirst = 0; } /* @@ -97689,7 +98104,7 @@ static int memjrnlCreateFile(MemJournal *p){ } if( rc==SQLITE_OK ){ /* No error has occurred. Free the in-memory buffers. */ - memjrnlFreeChunks(©); + memjrnlFreeChunks(copy.pFirst); } } if( rc!=SQLITE_OK ){ @@ -97772,7 +98187,6 @@ static int memjrnlWrite( nWrite -= iSpace; p->endpoint.iOffset += iSpace; } - p->nSize = iAmt + iOfst; } } @@ -97780,22 +98194,30 @@ static int memjrnlWrite( } /* -** Truncate the file. -** -** If the journal file is already on disk, truncate it there. Or, if it -** is still in main memory but is being truncated to zero bytes in size, -** ignore +** Truncate the in-memory file. */ static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ MemJournal *p = (MemJournal *)pJfd; - if( ALWAYS(size==0) ){ - memjrnlFreeChunks(p); - p->nSize = 0; - p->endpoint.pChunk = 0; - p->endpoint.iOffset = 0; - p->readpoint.pChunk = 0; - p->readpoint.iOffset = 0; + FileChunk *pIter = 0; + + if( size==0 ){ + memjrnlFreeChunks(p->pFirst); + p->pFirst = 0; + }else{ + i64 iOff = p->nChunkSize; + for(pIter=p->pFirst; ALWAYS(pIter) && iOff<=size; pIter=pIter->pNext){ + iOff += p->nChunkSize; + } + if( ALWAYS(pIter) ){ + memjrnlFreeChunks(pIter->pNext); + pIter->pNext = 0; + } } + + p->endpoint.pChunk = pIter; + p->endpoint.iOffset = size; + p->readpoint.pChunk = 0; + p->readpoint.iOffset = 0; return SQLITE_OK; } @@ -97804,7 +98226,7 @@ static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ */ static int memjrnlClose(sqlite3_file *pJfd){ MemJournal *p = (MemJournal *)pJfd; - memjrnlFreeChunks(p); + memjrnlFreeChunks(p->pFirst); return SQLITE_OK; } @@ -97978,7 +98400,7 @@ SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *pVfs){ ** Walk all expressions linked into the list of Window objects passed ** as the second argument. */ -static int walkWindowList(Walker *pWalker, Window *pList){ +static int walkWindowList(Walker *pWalker, Window *pList, int bOneOnly){ Window *pWin; for(pWin=pList; pWin; pWin=pWin->pNextWin){ int rc; @@ -97997,6 +98419,7 @@ static int walkWindowList(Walker *pWalker, Window *pList){ if( NEVER(rc) ) return WRC_Abort; rc = sqlite3WalkExpr(pWalker, pWin->pEnd); if( NEVER(rc) ) return WRC_Abort; + if( bOneOnly ) break; } return WRC_Continue; } @@ -98044,7 +98467,7 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){ } #ifndef SQLITE_OMIT_WINDOWFUNC if( ExprHasProperty(pExpr, EP_WinFunc) ){ - if( walkWindowList(pWalker, pExpr->y.pWin) ) return WRC_Abort; + if( walkWindowList(pWalker, pExpr->y.pWin, 1) ) return WRC_Abort; } #endif } @@ -98091,7 +98514,7 @@ SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker *pWalker, Select *p){ if( pParse && IN_RENAME_OBJECT ){ /* The following may return WRC_Abort if there are unresolvable ** symbols (e.g. a table that does not exist) in a window definition. */ - int rc = walkWindowList(pWalker, p->pWinDefn); + int rc = walkWindowList(pWalker, p->pWinDefn, 0); return rc; } } @@ -98109,7 +98532,7 @@ SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker *pWalker, Select *p){ SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){ SrcList *pSrc; int i; - struct SrcList_item *pItem; + SrcItem *pItem; pSrc = p->pSrc; if( pSrc ){ @@ -98275,7 +98698,6 @@ static void resolveAlias( ExprList *pEList, /* A result set */ int iCol, /* A column in the result set. 0..pEList->nExpr-1 */ Expr *pExpr, /* Transform this into an alias to the result set */ - const char *zType, /* "GROUP" or "ORDER" or "" */ int nSubquery /* Number of subqueries that the label is moving */ ){ Expr *pOrig; /* The iCol-th column of the result set */ @@ -98288,7 +98710,7 @@ static void resolveAlias( db = pParse->db; pDup = sqlite3ExprDup(db, pOrig, 0); if( pDup!=0 ){ - if( zType[0]!='G' ) incrAggFunctionDepth(pDup, nSubquery); + incrAggFunctionDepth(pDup, nSubquery); if( pExpr->op==TK_COLLATE ){ pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken); } @@ -98317,7 +98739,6 @@ static void resolveAlias( } sqlite3DbFree(db, pDup); } - ExprSetProperty(pExpr, EP_Alias); } @@ -98452,8 +98873,8 @@ static int lookupName( int cntTab = 0; /* Number of matching table names */ int nSubquery = 0; /* How many levels of subquery */ sqlite3 *db = pParse->db; /* The database connection */ - struct SrcList_item *pItem; /* Use for looping over pSrcList items */ - struct SrcList_item *pMatch = 0; /* The matching pSrcList item */ + SrcItem *pItem; /* Use for looping over pSrcList items */ + SrcItem *pMatch = 0; /* The matching pSrcList item */ NameContext *pTopNC = pNC; /* First namecontext in the list */ Schema *pSchema = 0; /* Schema of the expression */ int eNewExprOp = TK_COLUMN; /* New value for pExpr->op on success */ @@ -98574,25 +98995,33 @@ static int lookupName( #if !defined(SQLITE_OMIT_TRIGGER) || !defined(SQLITE_OMIT_UPSERT) /* If we have not already resolved the name, then maybe ** it is a new.* or old.* trigger argument reference. Or - ** maybe it is an excluded.* from an upsert. + ** maybe it is an excluded.* from an upsert. Or maybe it is + ** a reference in the RETURNING clause to a table being modified. */ - if( zDb==0 && zTab!=0 && cntTab==0 ){ + if( cnt==0 && zDb==0 ){ pTab = 0; #ifndef SQLITE_OMIT_TRIGGER if( pParse->pTriggerTab!=0 ){ int op = pParse->eTriggerOp; assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT ); - if( op!=TK_DELETE && sqlite3StrICmp("new",zTab) == 0 ){ + if( pParse->bReturning ){ + if( (pNC->ncFlags & NC_UBaseReg)!=0 + && (zTab==0 || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) + ){ + pExpr->iTable = op!=TK_DELETE; + pTab = pParse->pTriggerTab; + } + }else if( op!=TK_DELETE && zTab && sqlite3StrICmp("new",zTab) == 0 ){ pExpr->iTable = 1; pTab = pParse->pTriggerTab; - }else if( op!=TK_INSERT && sqlite3StrICmp("old",zTab)==0 ){ + }else if( op!=TK_INSERT && zTab && sqlite3StrICmp("old",zTab)==0 ){ pExpr->iTable = 0; pTab = pParse->pTriggerTab; } } #endif /* SQLITE_OMIT_TRIGGER */ #ifndef SQLITE_OMIT_UPSERT - if( (pNC->ncFlags & NC_UUpsert)!=0 ){ + if( (pNC->ncFlags & NC_UUpsert)!=0 && zTab!=0 ){ Upsert *pUpsert = pNC->uNC.pUpsert; if( pUpsert && sqlite3StrICmp("excluded",zTab)==0 ){ pTab = pUpsert->pUpsertSrc->a[0].pTab; @@ -98620,6 +99049,7 @@ static int lookupName( } if( iCol<pTab->nCol ){ cnt++; + pMatch = 0; #ifndef SQLITE_OMIT_UPSERT if( pExpr->iTable==EXCLUDED_TABLE_NUMBER ){ testcase( iCol==(-1) ); @@ -98631,27 +99061,32 @@ static int lookupName( pExpr->iTable = pNC->uNC.pUpsert->regData + sqlite3TableColumnToStorage(pTab, iCol); eNewExprOp = TK_REGISTER; - ExprSetProperty(pExpr, EP_Alias); } }else #endif /* SQLITE_OMIT_UPSERT */ { -#ifndef SQLITE_OMIT_TRIGGER - if( iCol<0 ){ - pExpr->affExpr = SQLITE_AFF_INTEGER; - }else if( pExpr->iTable==0 ){ - testcase( iCol==31 ); - testcase( iCol==32 ); - pParse->oldmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<<iCol)); - }else{ - testcase( iCol==31 ); - testcase( iCol==32 ); - pParse->newmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<<iCol)); - } pExpr->y.pTab = pTab; - pExpr->iColumn = (i16)iCol; - eNewExprOp = TK_TRIGGER; + if( pParse->bReturning ){ + eNewExprOp = TK_REGISTER; + pExpr->iTable = pNC->uNC.iBaseReg + (pTab->nCol+1)*pExpr->iTable + + sqlite3TableColumnToStorage(pTab, iCol) + 1; + }else{ + pExpr->iColumn = (i16)iCol; + eNewExprOp = TK_TRIGGER; +#ifndef SQLITE_OMIT_TRIGGER + if( iCol<0 ){ + pExpr->affExpr = SQLITE_AFF_INTEGER; + }else if( pExpr->iTable==0 ){ + testcase( iCol==31 ); + testcase( iCol==32 ); + pParse->oldmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<<iCol)); + }else{ + testcase( iCol==31 ); + testcase( iCol==32 ); + pParse->newmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<<iCol)); + } #endif /* SQLITE_OMIT_TRIGGER */ + } } } } @@ -98721,7 +99156,7 @@ static int lookupName( sqlite3ErrorMsg(pParse, "row value misused"); return WRC_Abort; } - resolveAlias(pParse, pEList, j, pExpr, "", nSubquery); + resolveAlias(pParse, pEList, j, pExpr, nSubquery); cnt = 1; pMatch = 0; assert( zTab==0 && zDb==0 ); @@ -98756,6 +99191,7 @@ static int lookupName( assert( pExpr->op==TK_ID ); if( ExprHasProperty(pExpr,EP_DblQuoted) && areDoubleQuotedStringsEnabled(db, pTopNC) + && (db->init.bDropColumn==0 || sqlite3StrICmp(zCol, db->init.azInit[0])!=0) ){ /* If a double-quoted identifier does not match any known column name, ** then treat it as a string. @@ -98770,6 +99206,11 @@ static int lookupName( ** Someday, I hope to get rid of this hack. Unfortunately there is ** a huge amount of legacy SQL that uses it. So for now, we just ** issue a warning. + ** + ** 2021-03-15: ticket 1c24a659e6d7f3a1 + ** Do not do the ID-to-STRING conversion when doing the schema + ** sanity check following a DROP COLUMN if the identifer name matches + ** the name of the column being dropped. */ sqlite3_log(SQLITE_WARNING, "double-quoted string literal: \"%w\"", zCol); @@ -98823,18 +99264,24 @@ static int lookupName( /* Clean up and return */ - sqlite3ExprDelete(db, pExpr->pLeft); - pExpr->pLeft = 0; - sqlite3ExprDelete(db, pExpr->pRight); - pExpr->pRight = 0; + if( !ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){ + sqlite3ExprDelete(db, pExpr->pLeft); + pExpr->pLeft = 0; + sqlite3ExprDelete(db, pExpr->pRight); + pExpr->pRight = 0; + } pExpr->op = eNewExprOp; ExprSetProperty(pExpr, EP_Leaf); lookupname_end: if( cnt==1 ){ assert( pNC!=0 ); - if( !ExprHasProperty(pExpr, EP_Alias) ){ +#ifndef SQLITE_OMIT_AUTHORIZATION + if( pParse->db->xAuth + && (pExpr->op==TK_COLUMN || pExpr->op==TK_TRIGGER) + ){ sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList); } +#endif /* Increment the nRef value on all name contexts from TopNC up to ** the point where the name matched. */ for(;;){ @@ -98856,7 +99303,7 @@ lookupname_end: SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){ Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0); if( p ){ - struct SrcList_item *pItem = &pSrc->a[iSrc]; + SrcItem *pItem = &pSrc->a[iSrc]; Table *pTab = p->y.pTab = pItem->pTab; p->iTable = pItem->iCursor; if( p->y.pTab->iPKey==iCol ){ @@ -98968,7 +99415,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ */ case TK_ROW: { SrcList *pSrcList = pNC->pSrcList; - struct SrcList_item *pItem; + SrcItem *pItem; assert( pSrcList && pSrcList->nSrc>=1 ); pItem = pSrcList->a; pExpr->op = TK_COLUMN; @@ -98979,6 +99426,47 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ break; } + /* An optimization: Attempt to convert + ** + ** "expr IS NOT NULL" --> "TRUE" + ** "expr IS NULL" --> "FALSE" + ** + ** if we can prove that "expr" is never NULL. Call this the + ** "NOT NULL strength reduction optimization". + ** + ** If this optimization occurs, also restore the NameContext ref-counts + ** to the state they where in before the "column" LHS expression was + ** resolved. This prevents "column" from being counted as having been + ** referenced, which might prevent a SELECT from being erroneously + ** marked as correlated. + */ + case TK_NOTNULL: + case TK_ISNULL: { + int anRef[8]; + NameContext *p; + int i; + for(i=0, p=pNC; p && i<ArraySize(anRef); p=p->pNext, i++){ + anRef[i] = p->nRef; + } + sqlite3WalkExpr(pWalker, pExpr->pLeft); + if( 0==sqlite3ExprCanBeNull(pExpr->pLeft) && !IN_RENAME_OBJECT ){ + if( pExpr->op==TK_NOTNULL ){ + pExpr->u.zToken = "true"; + ExprSetProperty(pExpr, EP_IsTrue); + }else{ + pExpr->u.zToken = "false"; + ExprSetProperty(pExpr, EP_IsFalse); + } + pExpr->op = TK_TRUEFALSE; + for(i=0, p=pNC; p && i<ArraySize(anRef); p=p->pNext, i++){ + p->nRef = anRef[i]; + } + sqlite3ExprDelete(pParse->db, pExpr->pLeft); + pExpr->pLeft = 0; + } + return WRC_Prune; + } + /* A column name: ID ** Or table name and column name: ID.ID ** Or a database, table and column: ID.ID.ID @@ -99206,6 +99694,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( pWin==pExpr->y.pWin ); if( IN_RENAME_OBJECT==0 ){ sqlite3WindowUpdate(pParse, pSel ? pSel->pWinDefn : 0, pWin, pDef); + if( pParse->db->mallocFailed ) break; } sqlite3WalkExprList(pWalker, pWin->pPartition); sqlite3WalkExprList(pWalker, pWin->pOrderBy); @@ -99280,7 +99769,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( !ExprHasProperty(pExpr, EP_Reduced) ); /* Handle special cases of "x IS TRUE", "x IS FALSE", "x IS NOT TRUE", ** and "x IS NOT FALSE". */ - if( pRight && (pRight->op==TK_ID || pRight->op==TK_TRUEFALSE) ){ + if( ALWAYS(pRight) && (pRight->op==TK_ID || pRight->op==TK_TRUEFALSE) ){ int rc = resolveExprStep(pWalker, pRight); if( rc==WRC_Abort ) return WRC_Abort; if( pRight->op==TK_TRUEFALSE ){ @@ -99596,8 +100085,7 @@ SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy( resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr); return 1; } - resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr, - zType,0); + resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr,0); } } return 0; @@ -99782,27 +100270,26 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ /* Recursively resolve names in all subqueries */ for(i=0; i<p->pSrc->nSrc; i++){ - struct SrcList_item *pItem = &p->pSrc->a[i]; + SrcItem *pItem = &p->pSrc->a[i]; if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ - NameContext *pNC; /* Used to iterate name contexts */ - int nRef = 0; /* Refcount for pOuterNC and outer contexts */ + int nRef = pOuterNC ? pOuterNC->nRef : 0; const char *zSavedContext = pParse->zAuthContext; - /* Count the total number of references to pOuterNC and all of its - ** parent contexts. After resolving references to expressions in - ** pItem->pSelect, check if this value has changed. If so, then - ** SELECT statement pItem->pSelect must be correlated. Set the - ** pItem->fg.isCorrelated flag if this is the case. */ - for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef += pNC->nRef; - if( pItem->zName ) pParse->zAuthContext = pItem->zName; sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); pParse->zAuthContext = zSavedContext; if( pParse->nErr || db->mallocFailed ) return WRC_Abort; - for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef -= pNC->nRef; - assert( pItem->fg.isCorrelated==0 && nRef<=0 ); - pItem->fg.isCorrelated = (nRef!=0); + /* If the number of references to the outer context changed when + ** expressions in the sub-select were resolved, the sub-select + ** is correlated. It is not required to check the refcount on any + ** but the innermost outer context object, as lookupName() increments + ** the refcount on all contexts between the current one and the + ** context containing the column when it resolves a name. */ + if( pOuterNC ){ + assert( pItem->fg.isCorrelated==0 && pOuterNC->nRef>=nRef ); + pItem->fg.isCorrelated = (pOuterNC->nRef>nRef); + } } } @@ -99844,7 +100331,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ ** Minor point: If this is the case, then the expression will be ** re-evaluated for each reference to it. */ - assert( (sNC.ncFlags & (NC_UAggInfo|NC_UUpsert))==0 ); + assert( (sNC.ncFlags & (NC_UAggInfo|NC_UUpsert|NC_UBaseReg))==0 ); sNC.uNC.pEList = p->pEList; sNC.ncFlags |= NC_UEList; if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort; @@ -99852,7 +100339,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ /* Resolve names in table-valued-function arguments */ for(i=0; i<p->pSrc->nSrc; i++){ - struct SrcList_item *pItem = &p->pSrc->a[i]; + SrcItem *pItem = &p->pSrc->a[i]; if( pItem->fg.isTabFunc && sqlite3ResolveExprListNames(&sNC, pItem->u1.pFuncArg) ){ @@ -100261,7 +100748,18 @@ SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken( const Token *pCollName, /* Name of collating sequence */ int dequote /* True to dequote pCollName */ ){ - if( pCollName->n>0 ){ + assert( pExpr!=0 || pParse->db->mallocFailed ); + if( pExpr==0 ) return 0; + if( pExpr->op==TK_VECTOR ){ + ExprList *pList = pExpr->x.pList; + if( ALWAYS(pList!=0) ){ + int i; + for(i=0; i<pList->nExpr; i++){ + pList->a[i].pExpr = sqlite3ExprAddCollateToken(pParse,pList->a[i].pExpr, + pCollName, dequote); + } + } + }else if( pCollName->n>0 ){ Expr *pNew = sqlite3ExprAlloc(pParse->db, TK_COLLATE, pCollName, dequote); if( pNew ){ pNew->pLeft = pExpr; @@ -101113,8 +101611,8 @@ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ }else if( (ExprAlwaysFalse(pLeft) || ExprAlwaysFalse(pRight)) && !IN_RENAME_OBJECT ){ - sqlite3ExprDelete(db, pLeft); - sqlite3ExprDelete(db, pRight); + sqlite3ExprDeferredDelete(pParse, pLeft); + sqlite3ExprDeferredDelete(pParse, pRight); return sqlite3Expr(db, TK_INTEGER, "0"); }else{ return sqlite3PExpr(pParse, TK_AND, pLeft, pRight); @@ -101311,6 +101809,22 @@ SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ if( p ) sqlite3ExprDeleteNN(db, p); } + +/* +** Arrange to cause pExpr to be deleted when the pParse is deleted. +** This is similar to sqlite3ExprDelete() except that the delete is +** deferred untilthe pParse is deleted. +** +** The pExpr might be deleted immediately on an OOM error. +** +** The deferred delete is (currently) implemented by adding the +** pExpr to the pParse->pConstExpr list with a register number of 0. +*/ +SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ + pParse->pConstExpr = + sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr); +} + /* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the ** expression. */ @@ -101685,8 +102199,8 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ if( pNew==0 ) return 0; pNew->nSrc = pNew->nAlloc = p->nSrc; for(i=0; i<p->nSrc; i++){ - struct SrcList_item *pNewItem = &pNew->a[i]; - struct SrcList_item *pOldItem = &p->a[i]; + SrcItem *pNewItem = &pNew->a[i]; + SrcItem *pOldItem = &p->a[i]; Table *pTab; pNewItem->pSchema = pOldItem->pSchema; pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); @@ -101699,7 +102213,10 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ if( pNewItem->fg.isIndexedBy ){ pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy); } - pNewItem->pIBIndex = pOldItem->pIBIndex; + pNewItem->u2 = pOldItem->u2; + if( pNewItem->fg.isCte ){ + pNewItem->u2.pCteUse->nUse++; + } if( pNewItem->fg.isTabFunc ){ pNewItem->u1.pFuncArg = sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags); @@ -102737,7 +103254,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( /* Code an OP_Transaction and OP_TableLock for <table>. */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - assert( iDb>=0 && iDb<SQLITE_MAX_ATTACHED ); + assert( iDb>=0 && iDb<SQLITE_MAX_DB ); sqlite3CodeVerifySchema(pParse, iDb); sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); @@ -105933,8 +106450,7 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aCol[iAgg].pCExpr = pExpr; - pParse->pConstExpr = - sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr); + sqlite3ExprDeferredDelete(pParse, pExpr); } } }else{ @@ -105943,8 +106459,7 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aFunc[iAgg].pFExpr = pExpr; - pParse->pConstExpr = - sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr); + sqlite3ExprDeferredDelete(pParse, pExpr); } } } @@ -106016,7 +106531,7 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ /* Check to see if the column is in one of the tables in the FROM ** clause of the aggregate query */ if( ALWAYS(pSrcList!=0) ){ - struct SrcList_item *pItem = pSrcList->a; + SrcItem *pItem = pSrcList->a; for(i=0; i<pSrcList->nSrc; i++, pItem++){ struct AggInfo_col *pCol; assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); @@ -106305,15 +106820,22 @@ static int isAlterableTable(Parse *pParse, Table *pTab){ ** statement to ensure that the operation has not rendered any schema ** objects unusable. */ -static void renameTestSchema(Parse *pParse, const char *zDb, int bTemp){ +static void renameTestSchema( + Parse *pParse, /* Parse context */ + const char *zDb, /* Name of db to verify schema of */ + int bTemp, /* True if this is the temp db */ + const char *zWhen, /* "when" part of error message */ + const char *zDropColumn /* Name of column being dropped */ +){ + pParse->colNamesSet = 1; sqlite3NestedParse(pParse, "SELECT 1 " "FROM \"%w\"." DFLT_SCHEMA_TABLE " " "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" " AND sql NOT LIKE 'create virtual%%'" - " AND sqlite_rename_test(%Q, sql, type, name, %d)=NULL ", + " AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %Q)=NULL ", zDb, - zDb, bTemp + zDb, bTemp, zWhen, zDropColumn ); if( bTemp==0 ){ @@ -106322,8 +106844,8 @@ static void renameTestSchema(Parse *pParse, const char *zDb, int bTemp){ "FROM temp." DFLT_SCHEMA_TABLE " " "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" " AND sql NOT LIKE 'create virtual%%'" - " AND sqlite_rename_test(%Q, sql, type, name, 1)=NULL ", - zDb + " AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %Q)=NULL ", + zDb, zWhen, zDropColumn ); } } @@ -106332,12 +106854,12 @@ static void renameTestSchema(Parse *pParse, const char *zDb, int bTemp){ ** Generate code to reload the schema for database iDb. And, if iDb!=1, for ** the temp database as well. */ -static void renameReloadSchema(Parse *pParse, int iDb){ +static void renameReloadSchema(Parse *pParse, int iDb, u16 p5){ Vdbe *v = pParse->pVdbe; if( v ){ sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, iDb, 0); - if( iDb!=1 ) sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, 1, 0); + sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, iDb, 0, p5); + if( iDb!=1 ) sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, 1, 0, p5); } } @@ -106486,7 +107008,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( "sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), " "tbl_name = " "CASE WHEN tbl_name=%Q COLLATE nocase AND " - " sqlite_rename_test(%Q, sql, type, name, 1) " + " sqlite_rename_test(%Q, sql, type, name, 1, 'after rename',0) " "THEN %Q ELSE tbl_name END " "WHERE type IN ('view', 'trigger')" , zDb, zTabName, zName, zTabName, zDb, zName); @@ -106505,8 +107027,8 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( } #endif - renameReloadSchema(pParse, iDb); - renameTestSchema(pParse, zDb, iDb==1); + renameReloadSchema(pParse, iDb, INITFLAG_AlterRename); + renameTestSchema(pParse, zDb, iDb==1, "after rename", 0); exit_rename_table: sqlite3SrcListDelete(db, pSrc); @@ -106637,11 +107159,14 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ *zEnd-- = '\0'; } db->mDbFlags |= DBFLAG_PreferBuiltin; + /* substr() operations on characters, but addColOffset is in bytes. So we + ** have to use printf() to translate between these units: */ sqlite3NestedParse(pParse, "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET " - "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) " + "sql = printf('%%.%ds, ',sql) || %Q" + " || substr(sql,1+length(printf('%%.%ds',sql))) " "WHERE type = 'table' AND name = %Q", - zDb, pNew->addColOffset, zCol, pNew->addColOffset+1, + zDb, pNew->addColOffset, zCol, pNew->addColOffset, zTab ); sqlite3DbFree(db, zCol); @@ -106665,7 +107190,7 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ } /* Reload the table definition */ - renameReloadSchema(pParse, iDb); + renameReloadSchema(pParse, iDb, INITFLAG_AlterRename); } /* @@ -106765,7 +107290,7 @@ exit_begin_add_column: ** Or, if pTab is not a view or virtual table, zero is returned. */ #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) -static int isRealTable(Parse *pParse, Table *pTab){ +static int isRealTable(Parse *pParse, Table *pTab, int bDrop){ const char *zType = 0; #ifndef SQLITE_OMIT_VIEW if( pTab->pSelect ){ @@ -106778,15 +107303,16 @@ static int isRealTable(Parse *pParse, Table *pTab){ } #endif if( zType ){ - sqlite3ErrorMsg( - pParse, "cannot rename columns of %s \"%s\"", zType, pTab->zName + sqlite3ErrorMsg(pParse, "cannot %s %s \"%s\"", + (bDrop ? "drop column from" : "rename columns of"), + zType, pTab->zName ); return 1; } return 0; } #else /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */ -# define isRealTable(x,y) (0) +# define isRealTable(x,y,z) (0) #endif /* @@ -106815,7 +107341,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( /* Cannot alter a system table */ if( SQLITE_OK!=isAlterableTable(pParse, pTab) ) goto exit_rename_column; - if( SQLITE_OK!=isRealTable(pParse, pTab) ) goto exit_rename_column; + if( SQLITE_OK!=isRealTable(pParse, pTab, 0) ) goto exit_rename_column; /* Which schema holds the table to be altered */ iSchema = sqlite3SchemaToIndex(db, pTab->pSchema); @@ -106869,8 +107395,8 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( ); /* Drop and reload the database schema. */ - renameReloadSchema(pParse, iSchema); - renameTestSchema(pParse, zDb, iSchema==1); + renameReloadSchema(pParse, iSchema, INITFLAG_AlterRename); + renameTestSchema(pParse, zDb, iSchema==1, "after rename", 0); exit_rename_column: sqlite3SrcListDelete(db, pSrc); @@ -107122,23 +107648,33 @@ static void renameTokenFree(sqlite3 *db, RenameToken *pToken){ /* ** Search the Parse object passed as the first argument for a RenameToken -** object associated with parse tree element pPtr. If found, remove it -** from the Parse object and add it to the list maintained by the -** RenameCtx object passed as the second argument. +** object associated with parse tree element pPtr. If found, return a pointer +** to it. Otherwise, return NULL. +** +** If the second argument passed to this function is not NULL and a matching +** RenameToken object is found, remove it from the Parse object and add it to +** the list maintained by the RenameCtx object. */ -static void renameTokenFind(Parse *pParse, struct RenameCtx *pCtx, void *pPtr){ +static RenameToken *renameTokenFind( + Parse *pParse, + struct RenameCtx *pCtx, + void *pPtr +){ RenameToken **pp; assert( pPtr!=0 ); for(pp=&pParse->pRename; (*pp); pp=&(*pp)->pNext){ if( (*pp)->p==pPtr ){ RenameToken *pToken = *pp; - *pp = pToken->pNext; - pToken->pNext = pCtx->pList; - pCtx->pList = pToken; - pCtx->nList++; - break; + if( pCtx ){ + *pp = pToken->pNext; + pToken->pNext = pCtx->pList; + pCtx->pList = pToken; + pCtx->nList++; + } + return pToken; } } + return 0; } /* @@ -107209,7 +107745,7 @@ static RenameToken *renameColumnTokenNext(RenameCtx *pCtx){ */ static void renameColumnParseError( sqlite3_context *pCtx, - int bPost, + const char *zWhen, sqlite3_value *pType, sqlite3_value *pObject, Parse *pParse @@ -107218,8 +107754,8 @@ static void renameColumnParseError( const char *zN = (const char*)sqlite3_value_text(pObject); char *zErr; - zErr = sqlite3_mprintf("error in %s %s%s: %s", - zT, zN, (bPost ? " after rename" : ""), + zErr = sqlite3_mprintf("error in %s %s%s%s: %s", + zT, zN, (zWhen[0] ? " " : ""), zWhen, pParse->zErrMsg ); sqlite3_result_error(pCtx, zErr, -1); @@ -107284,12 +107820,17 @@ static int renameParseSql( const char *zDb, /* Name of schema SQL belongs to */ sqlite3 *db, /* Database handle */ const char *zSql, /* SQL to parse */ - int bTemp /* True if SQL is from temp schema */ + int bTemp, /* True if SQL is from temp schema */ + const char *zDropColumn /* Name of column being dropped */ ){ int rc; char *zErr = 0; db->init.iDb = bTemp ? 1 : sqlite3FindDbName(db, zDb); + if( zDropColumn ){ + db->init.bDropColumn = 1; + db->init.azInit = (char**)&zDropColumn; + } /* Parse the SQL statement passed as the first argument. If no error ** occurs and the parse does not result in a new table, index or @@ -107298,7 +107839,7 @@ static int renameParseSql( p->eParseMode = PARSE_MODE_RENAME; p->db = db; p->nQueryLoop = 1; - rc = sqlite3RunParser(p, zSql, &zErr); + rc = zSql ? sqlite3RunParser(p, zSql, &zErr) : SQLITE_NOMEM; assert( p->zErrMsg==0 ); assert( rc!=SQLITE_OK || zErr==0 ); p->zErrMsg = zErr; @@ -107322,6 +107863,7 @@ static int renameParseSql( #endif db->init.iDb = 0; + db->init.bDropColumn = 0; return rc; } @@ -107451,7 +107993,7 @@ static int renameResolveTrigger(Parse *pParse){ if( pSrc ){ int i; for(i=0; i<pSrc->nSrc && rc==SQLITE_OK; i++){ - struct SrcList_item *p = &pSrc->a[i]; + SrcItem *p = &pSrc->a[i]; p->iCursor = pParse->nTab++; if( p->pSelect ){ sqlite3SelectPrep(pParse, p->pSelect, 0); @@ -107477,9 +108019,8 @@ static int renameResolveTrigger(Parse *pParse){ rc = sqlite3ResolveExprListNames(&sNC, pStep->pExprList); } assert( !pStep->pUpsert || (!pStep->pWhere && !pStep->pExprList) ); - if( pStep->pUpsert ){ + if( pStep->pUpsert && rc==SQLITE_OK ){ Upsert *pUpsert = pStep->pUpsert; - assert( rc==SQLITE_OK ); pUpsert->pUpsertSrc = pSrc; sNC.uNC.pUpsert = pUpsert; sNC.ncFlags = NC_UUpsert; @@ -107624,7 +108165,7 @@ static void renameColumnFunc( #ifndef SQLITE_OMIT_AUTHORIZATION db->xAuth = 0; #endif - rc = renameParseSql(&sParse, zDb, db, zSql, bTemp); + rc = renameParseSql(&sParse, zDb, db, zSql, bTemp, 0); /* Find tokens that need to be replaced. */ memset(&sWalker, 0, sizeof(Walker)); @@ -107666,12 +108207,12 @@ static void renameColumnFunc( for(pIdx=sParse.pNewIndex; pIdx; pIdx=pIdx->pNext){ sqlite3WalkExprList(&sWalker, pIdx->aColExpr); } - } #ifndef SQLITE_OMIT_GENERATED_COLUMNS - for(i=0; i<sParse.pNewTable->nCol; i++){ - sqlite3WalkExpr(&sWalker, sParse.pNewTable->aCol[i].pDflt); - } + for(i=0; i<sParse.pNewTable->nCol; i++){ + sqlite3WalkExpr(&sWalker, sParse.pNewTable->aCol[i].pDflt); + } #endif + } for(pFKey=sParse.pNewTable->pFKey; pFKey; pFKey=pFKey->pNextFrom){ for(i=0; i<pFKey->nCol; i++){ @@ -107725,7 +108266,7 @@ static void renameColumnFunc( renameColumnFunc_done: if( rc!=SQLITE_OK ){ if( sParse.zErrMsg ){ - renameColumnParseError(context, 0, argv[1], argv[2], &sParse); + renameColumnParseError(context, "", argv[1], argv[2], &sParse); }else{ sqlite3_result_error_code(context, rc); } @@ -107763,7 +108304,7 @@ static int renameTableSelectCb(Walker *pWalker, Select *pSelect){ return WRC_Abort; } for(i=0; i<pSrc->nSrc; i++){ - struct SrcList_item *pItem = &pSrc->a[i]; + SrcItem *pItem = &pSrc->a[i]; if( pItem->pTab==p->pTab ){ renameTokenFind(pWalker->pParse, p, pItem->zName); } @@ -107828,7 +108369,7 @@ static void renameTableFunc( sWalker.xSelectCallback = renameTableSelectCb; sWalker.u.pRename = &sCtx; - rc = renameParseSql(&sParse, zDb, db, zInput, bTemp); + rc = renameParseSql(&sParse, zDb, db, zInput, bTemp, 0); if( rc==SQLITE_OK ){ int isLegacy = (db->flags & SQLITE_LegacyAlter); @@ -107914,7 +108455,7 @@ static void renameTableFunc( } if( rc!=SQLITE_OK ){ if( sParse.zErrMsg ){ - renameColumnParseError(context, 0, argv[1], argv[2], &sParse); + renameColumnParseError(context, "", argv[1], argv[2], &sParse); }else{ sqlite3_result_error_code(context, rc); } @@ -107943,6 +108484,8 @@ static void renameTableFunc( ** 2: Object type ("view", "table", "trigger" or "index"). ** 3: Object name. ** 4: True if object is from temp schema. +** 5: "when" part of error message. +** 6: Name of column being dropped, or NULL. ** ** Unless it finds an error, this function normally returns NULL. However, it ** returns integer value 1 if: @@ -107960,6 +108503,8 @@ static void renameTableTest( char const *zInput = (const char*)sqlite3_value_text(argv[1]); int bTemp = sqlite3_value_int(argv[4]); int isLegacy = (db->flags & SQLITE_LegacyAlter); + char const *zWhen = (const char*)sqlite3_value_text(argv[5]); + char const *zDropColumn = (const char*)sqlite3_value_text(argv[6]); #ifndef SQLITE_OMIT_AUTHORIZATION sqlite3_xauth xAuth = db->xAuth; @@ -107970,7 +108515,7 @@ static void renameTableTest( if( zDb && zInput ){ int rc; Parse sParse; - rc = renameParseSql(&sParse, zDb, db, zInput, bTemp); + rc = renameParseSql(&sParse, zDb, db, zInput, bTemp, zDropColumn); if( rc==SQLITE_OK ){ if( isLegacy==0 && sParse.pNewTable && sParse.pNewTable->pSelect ){ NameContext sNC; @@ -107992,8 +108537,8 @@ static void renameTableTest( } } - if( rc!=SQLITE_OK ){ - renameColumnParseError(context, 1, argv[2], argv[3], &sParse); + if( rc!=SQLITE_OK && zWhen ){ + renameColumnParseError(context, zWhen, argv[2], argv[3],&sParse); } renameParseCleanup(&sParse); } @@ -108003,14 +108548,206 @@ static void renameTableTest( #endif } +/* +** The implementation of internal UDF sqlite_drop_column(). +** +** Arguments: +** +** argv[0]: An integer - the index of the schema containing the table +** argv[1]: CREATE TABLE statement to modify. +** argv[2]: An integer - the index of the column to remove. +** +** The value returned is a string containing the CREATE TABLE statement +** with column argv[2] removed. +*/ +static void dropColumnFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + sqlite3 *db = sqlite3_context_db_handle(context); + int iSchema = sqlite3_value_int(argv[0]); + const char *zSql = (const char*)sqlite3_value_text(argv[1]); + int iCol = sqlite3_value_int(argv[2]); + const char *zDb = db->aDb[iSchema].zDbSName; + int rc; + Parse sParse; + RenameToken *pCol; + Table *pTab; + const char *zEnd; + char *zNew = 0; + +#ifndef SQLITE_OMIT_AUTHORIZATION + sqlite3_xauth xAuth = db->xAuth; + db->xAuth = 0; +#endif + + UNUSED_PARAMETER(NotUsed); + rc = renameParseSql(&sParse, zDb, db, zSql, iSchema==1, 0); + if( rc!=SQLITE_OK ) goto drop_column_done; + pTab = sParse.pNewTable; + if( pTab==0 || pTab->nCol==1 || iCol>=pTab->nCol ){ + /* This can happen if the sqlite_schema table is corrupt */ + rc = SQLITE_CORRUPT_BKPT; + goto drop_column_done; + } + + pCol = renameTokenFind(&sParse, 0, (void*)pTab->aCol[iCol].zName); + if( iCol<pTab->nCol-1 ){ + RenameToken *pEnd; + pEnd = renameTokenFind(&sParse, 0, (void*)pTab->aCol[iCol+1].zName); + zEnd = (const char*)pEnd->t.z; + }else{ + zEnd = (const char*)&zSql[pTab->addColOffset]; + while( ALWAYS(pCol->t.z[0]!=0) && pCol->t.z[0]!=',' ) pCol->t.z--; + } + + zNew = sqlite3MPrintf(db, "%.*s%s", pCol->t.z-zSql, zSql, zEnd); + sqlite3_result_text(context, zNew, -1, SQLITE_TRANSIENT); + sqlite3_free(zNew); + +drop_column_done: + renameParseCleanup(&sParse); +#ifndef SQLITE_OMIT_AUTHORIZATION + db->xAuth = xAuth; +#endif + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(context, rc); + } +} + +/* +** This function is called by the parser upon parsing an +** +** ALTER TABLE pSrc DROP COLUMN pName +** +** statement. Argument pSrc contains the possibly qualified name of the +** table being edited, and token pName the name of the column to drop. +*/ +SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, Token *pName){ + sqlite3 *db = pParse->db; /* Database handle */ + Table *pTab; /* Table to modify */ + int iDb; /* Index of db containing pTab in aDb[] */ + const char *zDb; /* Database containing pTab ("main" etc.) */ + char *zCol = 0; /* Name of column to drop */ + int iCol; /* Index of column zCol in pTab->aCol[] */ + + /* Look up the table being altered. */ + assert( pParse->pNewTable==0 ); + assert( sqlite3BtreeHoldsAllMutexes(db) ); + if( NEVER(db->mallocFailed) ) goto exit_drop_column; + pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]); + if( !pTab ) goto exit_drop_column; + + /* Make sure this is not an attempt to ALTER a view, virtual table or + ** system table. */ + if( SQLITE_OK!=isAlterableTable(pParse, pTab) ) goto exit_drop_column; + if( SQLITE_OK!=isRealTable(pParse, pTab, 1) ) goto exit_drop_column; + + /* Find the index of the column being dropped. */ + zCol = sqlite3NameFromToken(db, pName); + if( zCol==0 ){ + assert( db->mallocFailed ); + goto exit_drop_column; + } + iCol = sqlite3ColumnIndex(pTab, zCol); + if( iCol<0 ){ + sqlite3ErrorMsg(pParse, "no such column: \"%s\"", zCol); + goto exit_drop_column; + } + + /* Do not allow the user to drop a PRIMARY KEY column or a column + ** constrained by a UNIQUE constraint. */ + if( pTab->aCol[iCol].colFlags & (COLFLAG_PRIMKEY|COLFLAG_UNIQUE) ){ + sqlite3ErrorMsg(pParse, "cannot drop %s column: \"%s\"", + (pTab->aCol[iCol].colFlags&COLFLAG_PRIMKEY) ? "PRIMARY KEY" : "UNIQUE", + zCol + ); + goto exit_drop_column; + } + + /* Do not allow the number of columns to go to zero */ + if( pTab->nCol<=1 ){ + sqlite3ErrorMsg(pParse, "cannot drop column \"%s\": no other columns exist",zCol); + goto exit_drop_column; + } + + /* Edit the sqlite_schema table */ + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( iDb>=0 ); + zDb = db->aDb[iDb].zDbSName; + renameTestSchema(pParse, zDb, iDb==1, "", 0); + sqlite3NestedParse(pParse, + "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET " + "sql = sqlite_drop_column(%d, sql, %d) " + "WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)" + , zDb, iDb, iCol, pTab->zName + ); + + /* Drop and reload the database schema. */ + renameReloadSchema(pParse, iDb, INITFLAG_AlterDrop); + renameTestSchema(pParse, zDb, iDb==1, "after drop column", zCol); + + /* Edit rows of table on disk */ + if( pParse->nErr==0 && (pTab->aCol[iCol].colFlags & COLFLAG_VIRTUAL)==0 ){ + int i; + int addr; + int reg; + int regRec; + Index *pPk = 0; + int nField = 0; /* Number of non-virtual columns after drop */ + int iCur; + Vdbe *v = sqlite3GetVdbe(pParse); + iCur = pParse->nTab++; + sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenWrite); + addr = sqlite3VdbeAddOp1(v, OP_Rewind, iCur); VdbeCoverage(v); + reg = ++pParse->nMem; + pParse->nMem += pTab->nCol; + if( HasRowid(pTab) ){ + sqlite3VdbeAddOp2(v, OP_Rowid, iCur, reg); + }else{ + pPk = sqlite3PrimaryKeyIndex(pTab); + } + for(i=0; i<pTab->nCol; i++){ + if( i!=iCol && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){ + int regOut; + if( pPk ){ + int iPos = sqlite3TableColumnToIndex(pPk, i); + int iColPos = sqlite3TableColumnToIndex(pPk, iCol); + regOut = reg+1+iPos-(iPos>iColPos); + }else{ + regOut = reg+1+nField; + } + sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, i, regOut); + nField++; + } + } + regRec = reg + pTab->nCol; + sqlite3VdbeAddOp3(v, OP_MakeRecord, reg+1, nField, regRec); + if( pPk ){ + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iCur, regRec, reg+1, pPk->nKeyCol); + }else{ + sqlite3VdbeAddOp3(v, OP_Insert, iCur, regRec, reg); + } + + sqlite3VdbeAddOp2(v, OP_Next, iCur, addr+1); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addr); + } + +exit_drop_column: + sqlite3DbFree(db, zCol); + sqlite3SrcListDelete(db, pSrc); +} + /* ** Register built-in functions used to help implement ALTER TABLE */ SQLITE_PRIVATE void sqlite3AlterFunctions(void){ static FuncDef aAlterTableFuncs[] = { - INTERNAL_FUNCTION(sqlite_rename_column, 9, renameColumnFunc), - INTERNAL_FUNCTION(sqlite_rename_table, 7, renameTableFunc), - INTERNAL_FUNCTION(sqlite_rename_test, 5, renameTableTest), + INTERNAL_FUNCTION(sqlite_rename_column, 9, renameColumnFunc), + INTERNAL_FUNCTION(sqlite_rename_table, 7, renameTableFunc), + INTERNAL_FUNCTION(sqlite_rename_test, 7, renameTableTest), + INTERNAL_FUNCTION(sqlite_drop_column, 3, dropColumnFunc), }; sqlite3InsertBuiltinFuncs(aAlterTableFuncs, ArraySize(aAlterTableFuncs)); } @@ -110399,6 +111136,62 @@ SQLITE_PRIVATE void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *p } #endif /* SQLITE_OMIT_ATTACH */ +/* +** Expression callback used by sqlite3FixAAAA() routines. +*/ +static int fixExprCb(Walker *p, Expr *pExpr){ + DbFixer *pFix = p->u.pFix; + if( !pFix->bTemp ) ExprSetProperty(pExpr, EP_FromDDL); + if( pExpr->op==TK_VARIABLE ){ + if( pFix->pParse->db->init.busy ){ + pExpr->op = TK_NULL; + }else{ + sqlite3ErrorMsg(pFix->pParse, "%s cannot use variables", pFix->zType); + return WRC_Abort; + } + } + return WRC_Continue; +} + +/* +** Select callback used by sqlite3FixAAAA() routines. +*/ +static int fixSelectCb(Walker *p, Select *pSelect){ + DbFixer *pFix = p->u.pFix; + int i; + SrcItem *pItem; + sqlite3 *db = pFix->pParse->db; + int iDb = sqlite3FindDbName(db, pFix->zDb); + SrcList *pList = pSelect->pSrc; + + if( NEVER(pList==0) ) return WRC_Continue; + for(i=0, pItem=pList->a; i<pList->nSrc; i++, pItem++){ + if( pFix->bTemp==0 ){ + if( pItem->zDatabase && iDb!=sqlite3FindDbName(db, pItem->zDatabase) ){ + sqlite3ErrorMsg(pFix->pParse, + "%s %T cannot reference objects in database %s", + pFix->zType, pFix->pName, pItem->zDatabase); + return WRC_Abort; + } + sqlite3DbFree(db, pItem->zDatabase); + pItem->zDatabase = 0; + pItem->pSchema = pFix->pSchema; + pItem->fg.fromDDL = 1; + } +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) + if( sqlite3WalkExpr(&pFix->w, pList->a[i].pOn) ) return WRC_Abort; +#endif + } + if( pSelect->pWith ){ + for(i=0; i<pSelect->pWith->nCte; i++){ + if( sqlite3WalkSelect(p, pSelect->pWith->a[i].pSelect) ){ + return WRC_Abort; + } + } + } + return WRC_Continue; +} + /* ** Initialize a DbFixer structure. This routine must be called prior ** to passing the structure to one of the sqliteFixAAAA() routines below. @@ -110410,9 +111203,7 @@ SQLITE_PRIVATE void sqlite3FixInit( const char *zType, /* "view", "trigger", or "index" */ const Token *pName /* Name of the view, trigger, or index */ ){ - sqlite3 *db; - - db = pParse->db; + sqlite3 *db = pParse->db; assert( db->nDb>iDb ); pFix->pParse = pParse; pFix->zDb = db->aDb[iDb].zDbSName; @@ -110420,6 +111211,13 @@ SQLITE_PRIVATE void sqlite3FixInit( pFix->zType = zType; pFix->pName = pName; pFix->bTemp = (iDb==1); + pFix->w.pParse = pParse; + pFix->w.xExprCallback = fixExprCb; + pFix->w.xSelectCallback = fixSelectCb; + pFix->w.xSelectCallback2 = 0; + pFix->w.walkerDepth = 0; + pFix->w.eCode = 0; + pFix->w.u.pFix = pFix; } /* @@ -110440,115 +111238,27 @@ SQLITE_PRIVATE int sqlite3FixSrcList( DbFixer *pFix, /* Context of the fixation */ SrcList *pList /* The Source list to check and modify */ ){ - int i; - struct SrcList_item *pItem; - sqlite3 *db = pFix->pParse->db; - int iDb = sqlite3FindDbName(db, pFix->zDb); - - if( NEVER(pList==0) ) return 0; - - for(i=0, pItem=pList->a; i<pList->nSrc; i++, pItem++){ - if( pFix->bTemp==0 ){ - if( pItem->zDatabase && iDb!=sqlite3FindDbName(db, pItem->zDatabase) ){ - sqlite3ErrorMsg(pFix->pParse, - "%s %T cannot reference objects in database %s", - pFix->zType, pFix->pName, pItem->zDatabase); - return 1; - } - sqlite3DbFree(db, pItem->zDatabase); - pItem->zDatabase = 0; - pItem->pSchema = pFix->pSchema; - pItem->fg.fromDDL = 1; - } -#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) - if( sqlite3FixSelect(pFix, pItem->pSelect) ) return 1; - if( sqlite3FixExpr(pFix, pItem->pOn) ) return 1; -#endif - if( pItem->fg.isTabFunc && sqlite3FixExprList(pFix, pItem->u1.pFuncArg) ){ - return 1; - } + int res = 0; + if( pList ){ + Select s; + memset(&s, 0, sizeof(s)); + s.pSrc = pList; + res = sqlite3WalkSelect(&pFix->w, &s); } - return 0; + return res; } #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) SQLITE_PRIVATE int sqlite3FixSelect( DbFixer *pFix, /* Context of the fixation */ Select *pSelect /* The SELECT statement to be fixed to one database */ ){ - while( pSelect ){ - if( sqlite3FixExprList(pFix, pSelect->pEList) ){ - return 1; - } - if( sqlite3FixSrcList(pFix, pSelect->pSrc) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pWhere) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pSelect->pGroupBy) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pHaving) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pSelect->pOrderBy) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pLimit) ){ - return 1; - } - if( pSelect->pWith ){ - int i; - for(i=0; i<pSelect->pWith->nCte; i++){ - if( sqlite3FixSelect(pFix, pSelect->pWith->a[i].pSelect) ){ - return 1; - } - } - } - pSelect = pSelect->pPrior; - } - return 0; + return sqlite3WalkSelect(&pFix->w, pSelect); } SQLITE_PRIVATE int sqlite3FixExpr( DbFixer *pFix, /* Context of the fixation */ Expr *pExpr /* The expression to be fixed to one database */ ){ - while( pExpr ){ - if( !pFix->bTemp ) ExprSetProperty(pExpr, EP_FromDDL); - if( pExpr->op==TK_VARIABLE ){ - if( pFix->pParse->db->init.busy ){ - pExpr->op = TK_NULL; - }else{ - sqlite3ErrorMsg(pFix->pParse, "%s cannot use variables", pFix->zType); - return 1; - } - } - if( ExprHasProperty(pExpr, EP_TokenOnly|EP_Leaf) ) break; - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - if( sqlite3FixSelect(pFix, pExpr->x.pSelect) ) return 1; - }else{ - if( sqlite3FixExprList(pFix, pExpr->x.pList) ) return 1; - } - if( sqlite3FixExpr(pFix, pExpr->pRight) ){ - return 1; - } - pExpr = pExpr->pLeft; - } - return 0; -} -SQLITE_PRIVATE int sqlite3FixExprList( - DbFixer *pFix, /* Context of the fixation */ - ExprList *pList /* The expression to be fixed to one database */ -){ - int i; - struct ExprList_item *pItem; - if( pList==0 ) return 0; - for(i=0, pItem=pList->a; i<pList->nExpr; i++, pItem++){ - if( sqlite3FixExpr(pFix, pItem->pExpr) ){ - return 1; - } - } - return 0; + return sqlite3WalkExpr(&pFix->w, pExpr); } #endif @@ -110558,25 +111268,20 @@ SQLITE_PRIVATE int sqlite3FixTriggerStep( TriggerStep *pStep /* The trigger step be fixed to one database */ ){ while( pStep ){ - if( sqlite3FixSelect(pFix, pStep->pSelect) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pStep->pWhere) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pStep->pExprList) ){ - return 1; - } - if( pStep->pFrom && sqlite3FixSrcList(pFix, pStep->pFrom) ){ + if( sqlite3WalkSelect(&pFix->w, pStep->pSelect) + || sqlite3WalkExpr(&pFix->w, pStep->pWhere) + || sqlite3WalkExprList(&pFix->w, pStep->pExprList) + || sqlite3FixSrcList(pFix, pStep->pFrom) + ){ return 1; } #ifndef SQLITE_OMIT_UPSERT if( pStep->pUpsert ){ Upsert *pUp = pStep->pUpsert; - if( sqlite3FixExprList(pFix, pUp->pUpsertTarget) - || sqlite3FixExpr(pFix, pUp->pUpsertTargetWhere) - || sqlite3FixExprList(pFix, pUp->pUpsertSet) - || sqlite3FixExpr(pFix, pUp->pUpsertWhere) + if( sqlite3WalkExprList(&pFix->w, pUp->pUpsertTarget) + || sqlite3WalkExpr(&pFix->w, pUp->pUpsertTargetWhere) + || sqlite3WalkExprList(&pFix->w, pUp->pUpsertSet) + || sqlite3WalkExpr(&pFix->w, pUp->pUpsertWhere) ){ return 1; } @@ -110584,6 +111289,7 @@ SQLITE_PRIVATE int sqlite3FixTriggerStep( #endif pStep = pStep->pNext; } + return 0; } #endif @@ -110735,7 +111441,6 @@ SQLITE_PRIVATE void sqlite3AuthRead( Schema *pSchema, /* The schema of the expression */ SrcList *pTabList /* All table that pExpr might refer to */ ){ - sqlite3 *db = pParse->db; Table *pTab = 0; /* The table being read */ const char *zCol; /* Name of the column of the table */ int iSrc; /* Index in pTabList->a[] of table being read */ @@ -110743,8 +111448,8 @@ SQLITE_PRIVATE void sqlite3AuthRead( int iCol; /* Index of column in table */ assert( pExpr->op==TK_COLUMN || pExpr->op==TK_TRIGGER ); - assert( !IN_RENAME_OBJECT || db->xAuth==0 ); - if( db->xAuth==0 ) return; + assert( !IN_RENAME_OBJECT ); + assert( pParse->db->xAuth!=0 ); iDb = sqlite3SchemaToIndex(pParse->db, pSchema); if( iDb<0 ){ /* An attempt to read a column out of a subquery or other @@ -110756,7 +111461,7 @@ SQLITE_PRIVATE void sqlite3AuthRead( pTab = pParse->pTriggerTab; }else{ assert( pTabList ); - for(iSrc=0; ALWAYS(iSrc<pTabList->nSrc); iSrc++){ + for(iSrc=0; iSrc<pTabList->nSrc; iSrc++){ if( pExpr->iTable==pTabList->a[iSrc].iCursor ){ pTab = pTabList->a[iSrc].pTab; break; @@ -110764,7 +111469,7 @@ SQLITE_PRIVATE void sqlite3AuthRead( } } iCol = pExpr->iColumn; - if( NEVER(pTab==0) ) return; + if( pTab==0 ) return; if( iCol>=0 ){ assert( iCol<pTab->nCol ); @@ -110775,7 +111480,7 @@ SQLITE_PRIVATE void sqlite3AuthRead( }else{ zCol = "ROWID"; } - assert( iDb>=0 && iDb<db->nDb ); + assert( iDb>=0 && iDb<pParse->db->nDb ); if( SQLITE_IGNORE==sqlite3AuthReadCol(pParse, pTab->zName, zCol, iDb) ){ pExpr->op = TK_NULL; } @@ -110801,11 +111506,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck( ** or if the parser is being invoked from within sqlite3_declare_vtab. */ assert( !IN_RENAME_OBJECT || db->xAuth==0 ); - if( db->init.busy || IN_SPECIAL_PARSE ){ - return SQLITE_OK; - } - - if( db->xAuth==0 ){ + if( db->xAuth==0 || db->init.busy || IN_SPECIAL_PARSE ){ return SQLITE_OK; } @@ -111011,10 +111712,36 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ /* Begin by generating some termination code at the end of the ** vdbe program */ - v = sqlite3GetVdbe(pParse); + v = pParse->pVdbe; + if( v==0 ){ + if( db->init.busy ){ + pParse->rc = SQLITE_DONE; + return; + } + v = sqlite3GetVdbe(pParse); + if( v==0 ) pParse->rc = SQLITE_ERROR; + } assert( !pParse->isMultiWrite || sqlite3VdbeAssertMayAbort(v, pParse->mayAbort)); if( v ){ + if( pParse->bReturning ){ + Returning *pReturning = pParse->u1.pReturning; + int addrRewind; + int i; + int reg; + + addrRewind = + sqlite3VdbeAddOp1(v, OP_Rewind, pReturning->iRetCur); + VdbeCoverage(v); + reg = pReturning->iRetReg; + for(i=0; i<pReturning->nRetCol; i++){ + sqlite3VdbeAddOp3(v, OP_Column, pReturning->iRetCur, i, reg+i); + } + sqlite3VdbeAddOp2(v, OP_ResultRow, reg, i); + sqlite3VdbeAddOp2(v, OP_Next, pReturning->iRetCur, addrRewind+1); + VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addrRewind); + } sqlite3VdbeAddOp0(v, OP_Halt); #if SQLITE_USER_AUTHENTICATION @@ -111092,12 +111819,16 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } } + if( pParse->bReturning ){ + Returning *pRet = pParse->u1.pReturning; + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol); + } + /* Finally, jump back to the beginning of the executable code. */ sqlite3VdbeGoto(v, 1); } } - /* Get the VDBE program ready for execution */ if( v && pParse->nErr==0 && !db->mallocFailed ){ @@ -111316,7 +112047,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( SQLITE_PRIVATE Table *sqlite3LocateTableItem( Parse *pParse, u32 flags, - struct SrcList_item *p + SrcItem *p ){ const char *zDb; assert( p->pSchema==0 || p->zDatabase==0 ); @@ -112074,7 +112805,8 @@ SQLITE_PRIVATE void sqlite3StartTable( }else #endif { - pParse->addrCrTab = + assert( !pParse->bReturning ); + pParse->u1.addrCrTab = sqlite3VdbeAddOp3(v, OP_CreateBtree, iDb, reg2, BTREE_INTKEY); } sqlite3OpenSchemaTable(pParse, iDb); @@ -112101,12 +112833,85 @@ begin_table_error: SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){ if( sqlite3_strnicmp(pCol->zName, "__hidden__", 10)==0 ){ pCol->colFlags |= COLFLAG_HIDDEN; + if( pTab ) pTab->tabFlags |= TF_HasHidden; }else if( pTab && pCol!=pTab->aCol && (pCol[-1].colFlags & COLFLAG_HIDDEN) ){ pTab->tabFlags |= TF_OOOHidden; } } #endif +/* +** Name of the special TEMP trigger used to implement RETURNING. The +** name begins with "sqlite_" so that it is guaranteed not to collide +** with any application-generated triggers. +*/ +#define RETURNING_TRIGGER_NAME "sqlite_returning" + +/* +** Clean up the data structures associated with the RETURNING clause. +*/ +static void sqlite3DeleteReturning(sqlite3 *db, Returning *pRet){ + Hash *pHash; + pHash = &(db->aDb[1].pSchema->trigHash); + sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, 0); + sqlite3ExprListDelete(db, pRet->pReturnEL); + sqlite3DbFree(db, pRet); +} + +/* +** Add the RETURNING clause to the parse currently underway. +** +** This routine creates a special TEMP trigger that will fire for each row +** of the DML statement. That TEMP trigger contains a single SELECT +** statement with a result set that is the argument of the RETURNING clause. +** The trigger has the Trigger.bReturning flag and an opcode of +** TK_RETURNING instead of TK_SELECT, so that the trigger code generator +** knows to handle it specially. The TEMP trigger is automatically +** removed at the end of the parse. +** +** When this routine is called, we do not yet know if the RETURNING clause +** is attached to a DELETE, INSERT, or UPDATE, so construct it as a +** RETURNING trigger instead. It will then be converted into the appropriate +** type on the first call to sqlite3TriggersExist(). +*/ +SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ + Returning *pRet; + Hash *pHash; + sqlite3 *db = pParse->db; + if( pParse->pNewTrigger ){ + sqlite3ErrorMsg(pParse, "cannot use RETURNING in a trigger"); + }else{ + assert( pParse->bReturning==0 ); + } + pParse->bReturning = 1; + pRet = sqlite3DbMallocZero(db, sizeof(*pRet)); + if( pRet==0 ){ + sqlite3ExprListDelete(db, pList); + return; + } + pParse->u1.pReturning = pRet; + pRet->pParse = pParse; + pRet->pReturnEL = pList; + sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))sqlite3DeleteReturning, pRet); + testcase( pParse->earlyCleanup ); + if( db->mallocFailed ) return; + pRet->retTrig.zName = RETURNING_TRIGGER_NAME; + pRet->retTrig.op = TK_RETURNING; + pRet->retTrig.tr_tm = TRIGGER_AFTER; + pRet->retTrig.bReturning = 1; + pRet->retTrig.pSchema = db->aDb[1].pSchema; + pRet->retTrig.step_list = &pRet->retTStep; + pRet->retTStep.op = TK_RETURNING; + pRet->retTStep.pTrig = &pRet->retTrig; + pRet->retTStep.pExprList = pList; + pHash = &(db->aDb[1].pSchema->trigHash); + assert( sqlite3HashFind(pHash, RETURNING_TRIGGER_NAME)==0 || pParse->nErr ); + if( sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, &pRet->retTrig) + ==&pRet->retTrig ){ + sqlite3OomFault(db); + } +} /* ** Add a new column to the table currently being constructed. @@ -112123,6 +112928,8 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ char *zType; Column *pCol; sqlite3 *db = pParse->db; + u8 hName; + if( (p = pParse->pNewTable)==0 ) return; if( p->nCol+1>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many columns on %s", p->zName); @@ -112134,8 +112941,9 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ memcpy(z, pName->z, pName->n); z[pName->n] = 0; sqlite3Dequote(z); + hName = sqlite3StrIHash(z); for(i=0; i<p->nCol; i++){ - if( sqlite3_stricmp(z, p->aCol[i].zName)==0 ){ + if( p->aCol[i].hName==hName && sqlite3StrICmp(z, p->aCol[i].zName)==0 ){ sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); sqlite3DbFree(db, z); return; @@ -112153,7 +112961,7 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ pCol = &p->aCol[p->nCol]; memset(pCol, 0, sizeof(p->aCol[0])); pCol->zName = z; - pCol->hName = sqlite3StrIHash(z); + pCol->hName = hName; sqlite3ColumnPropertiesFromName(p, pCol); if( pType->n==0 ){ @@ -112936,9 +113744,10 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ /* Convert the P3 operand of the OP_CreateBtree opcode from BTREE_INTKEY ** into BTREE_BLOBKEY. */ - if( pParse->addrCrTab ){ + assert( !pParse->bReturning ); + if( pParse->u1.addrCrTab ){ assert( v ); - sqlite3VdbeChangeP3(v, pParse->addrCrTab, BTREE_BLOBKEY); + sqlite3VdbeChangeP3(v, pParse->u1.addrCrTab, BTREE_BLOBKEY); } /* Locate the PRIMARY KEY index. Or, if this table was originally @@ -113402,7 +114211,7 @@ SQLITE_PRIVATE void sqlite3EndTable( /* Reparse everything to update our internal data structures */ sqlite3VdbeAddParseSchemaOp(v, iDb, - sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName)); + sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName),0); } /* Add the table to the in-memory representation of the database. @@ -113419,20 +114228,17 @@ SQLITE_PRIVATE void sqlite3EndTable( } pParse->pNewTable = 0; db->mDbFlags |= DBFLAG_SchemaChange; + } #ifndef SQLITE_OMIT_ALTERTABLE - if( !p->pSelect ){ - const char *zName = (const char *)pParse->sNameToken.z; - int nName; - assert( !pSelect && pCons && pEnd ); - if( pCons->z==0 ){ - pCons = pEnd; - } - nName = (int)((const char *)pCons->z - zName); - p->addColOffset = 13 + sqlite3Utf8CharLen(zName, nName); + if( !pSelect && !p->pSelect ){ + assert( pCons && pEnd ); + if( pCons->z==0 ){ + pCons = pEnd; } -#endif + p->addColOffset = 13 + (int)(pCons->z - pParse->sNameToken.z); } +#endif } #ifndef SQLITE_OMIT_VIEW @@ -113623,6 +114429,7 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ assert( pTable->aCol==0 ); pTable->nCol = pSelTab->nCol; pTable->aCol = pSelTab->aCol; + pTable->tabFlags |= (pSelTab->tabFlags & COLFLAG_NOINSERT); pSelTab->nCol = 0; pSelTab->aCol = 0; assert( sqlite3SchemaMutexHeld(db, 0, pTable->pSchema) ); @@ -114890,7 +115697,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( sqlite3RefillIndex(pParse, pIndex, iMem); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddParseSchemaOp(v, iDb, - sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName)); + sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName), 0); sqlite3VdbeAddOp2(v, OP_Expire, 0, 1); } @@ -114911,7 +115718,11 @@ SQLITE_PRIVATE void sqlite3CreateIndex( /* Clean up before exiting */ exit_create_index: if( pIndex ) sqlite3FreeIndex(db, pIndex); - if( pTab ){ /* Ensure all REPLACE indexes are at the end of the list */ + if( pTab ){ + /* Ensure all REPLACE indexes on pTab are at the end of the pIndex list. + ** The list was already ordered when this routine was entered, so at this + ** point at most a single index (the newly added index) will be out of + ** order. So we have to reorder at most one index. */ Index **ppFrom = &pTab->pIndex; Index *pThis; for(ppFrom=&pTab->pIndex; (pThis = *ppFrom)!=0; ppFrom=&pThis->pNext){ @@ -114925,6 +115736,16 @@ exit_create_index: } break; } +#ifdef SQLITE_DEBUG + /* Verify that all REPLACE indexes really are now at the end + ** of the index list. In other words, no other index type ever + ** comes after a REPLACE index on the list. */ + for(pThis = pTab->pIndex; pThis; pThis=pThis->pNext){ + assert( pThis->onError!=OE_Replace + || pThis->pNext==0 + || pThis->pNext->onError==OE_Replace ); + } +#endif } sqlite3ExprDelete(db, pPIWhere); sqlite3ExprListDelete(db, pList); @@ -115283,7 +116104,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( Token *pTable, /* Table to append */ Token *pDatabase /* Database of the table */ ){ - struct SrcList_item *pItem; + SrcItem *pItem; sqlite3 *db; assert( pDatabase==0 || pTable!=0 ); /* Cannot have C without B */ assert( pParse!=0 ); @@ -115324,7 +116145,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( */ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ int i; - struct SrcList_item *pItem; + SrcItem *pItem; assert(pList || pParse->db->mallocFailed ); if( pList ){ for(i=0, pItem=pList->a; i<pList->nSrc; i++, pItem++){ @@ -115342,7 +116163,7 @@ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ */ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ int i; - struct SrcList_item *pItem; + SrcItem *pItem; if( pList==0 ) return; for(pItem=pList->a, i=0; i<pList->nSrc; i++, pItem++){ if( pItem->zDatabase ) sqlite3DbFreeNN(db, pItem->zDatabase); @@ -115384,7 +116205,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( Expr *pOn, /* The ON clause of a join */ IdList *pUsing /* The USING clause of a join */ ){ - struct SrcList_item *pItem; + SrcItem *pItem; sqlite3 *db = pParse->db; if( !p && (pOn || pUsing) ){ sqlite3ErrorMsg(pParse, "a JOIN clause is required before %s", @@ -115428,7 +116249,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pIndexedBy){ assert( pIndexedBy!=0 ); if( p && pIndexedBy->n>0 ){ - struct SrcList_item *pItem; + SrcItem *pItem; assert( p->nSrc>0 ); pItem = &p->a[p->nSrc-1]; assert( pItem->fg.notIndexed==0 ); @@ -115458,7 +116279,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, Src sqlite3SrcListDelete(pParse->db, p2); }else{ p1 = pNew; - memcpy(&p1->a[1], p2->a, p2->nSrc*sizeof(struct SrcList_item)); + memcpy(&p1->a[1], p2->a, p2->nSrc*sizeof(SrcItem)); sqlite3DbFree(pParse->db, p2); } } @@ -115471,7 +116292,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, Src */ SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse *pParse, SrcList *p, ExprList *pList){ if( p ){ - struct SrcList_item *pItem = &p->a[p->nSrc-1]; + SrcItem *pItem = &p->a[p->nSrc-1]; assert( pItem->fg.notIndexed==0 ); assert( pItem->fg.isIndexedBy==0 ); assert( pItem->fg.isTabFunc==0 ); @@ -115626,7 +116447,7 @@ SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *pParse){ static void sqlite3CodeVerifySchemaAtToplevel(Parse *pToplevel, int iDb){ assert( iDb>=0 && iDb<pToplevel->db->nDb ); assert( pToplevel->db->aDb[iDb].pBt!=0 || iDb==1 ); - assert( iDb<SQLITE_MAX_ATTACHED+2 ); + assert( iDb<SQLITE_MAX_DB ); assert( sqlite3SchemaMutexHeld(pToplevel->db, iDb, 0) ); if( DbMaskTest(pToplevel->cookieMask, iDb)==0 ){ DbMaskSet(pToplevel->cookieMask, iDb); @@ -115968,24 +116789,76 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse *pParse, Index *pIdx){ } #ifndef SQLITE_OMIT_CTE +/* +** Create a new CTE object +*/ +SQLITE_PRIVATE Cte *sqlite3CteNew( + Parse *pParse, /* Parsing context */ + Token *pName, /* Name of the common-table */ + ExprList *pArglist, /* Optional column name list for the table */ + Select *pQuery, /* Query used to initialize the table */ + u8 eM10d /* The MATERIALIZED flag */ +){ + Cte *pNew; + sqlite3 *db = pParse->db; + + pNew = sqlite3DbMallocZero(db, sizeof(*pNew)); + assert( pNew!=0 || db->mallocFailed ); + + if( db->mallocFailed ){ + sqlite3ExprListDelete(db, pArglist); + sqlite3SelectDelete(db, pQuery); + }else{ + pNew->pSelect = pQuery; + pNew->pCols = pArglist; + pNew->zName = sqlite3NameFromToken(pParse->db, pName); + pNew->eM10d = eM10d; + } + return pNew; +} + +/* +** Clear information from a Cte object, but do not deallocate storage +** for the object itself. +*/ +static void cteClear(sqlite3 *db, Cte *pCte){ + assert( pCte!=0 ); + sqlite3ExprListDelete(db, pCte->pCols); + sqlite3SelectDelete(db, pCte->pSelect); + sqlite3DbFree(db, pCte->zName); +} + +/* +** Free the contents of the CTE object passed as the second argument. +*/ +SQLITE_PRIVATE void sqlite3CteDelete(sqlite3 *db, Cte *pCte){ + assert( pCte!=0 ); + cteClear(db, pCte); + sqlite3DbFree(db, pCte); +} + /* ** This routine is invoked once per CTE by the parser while parsing a -** WITH clause. +** WITH clause. The CTE described by teh third argument is added to +** the WITH clause of the second argument. If the second argument is +** NULL, then a new WITH argument is created. */ SQLITE_PRIVATE With *sqlite3WithAdd( Parse *pParse, /* Parsing context */ With *pWith, /* Existing WITH clause, or NULL */ - Token *pName, /* Name of the common-table */ - ExprList *pArglist, /* Optional column name list for the table */ - Select *pQuery /* Query used to initialize the table */ + Cte *pCte /* CTE to add to the WITH clause */ ){ sqlite3 *db = pParse->db; With *pNew; char *zName; + if( pCte==0 ){ + return pWith; + } + /* Check that the CTE name is unique within this WITH clause. If ** not, store an error in the Parse structure. */ - zName = sqlite3NameFromToken(pParse->db, pName); + zName = pCte->zName; if( zName && pWith ){ int i; for(i=0; i<pWith->nCte; i++){ @@ -116004,16 +116877,11 @@ SQLITE_PRIVATE With *sqlite3WithAdd( assert( (pNew!=0 && zName!=0) || db->mallocFailed ); if( db->mallocFailed ){ - sqlite3ExprListDelete(db, pArglist); - sqlite3SelectDelete(db, pQuery); - sqlite3DbFree(db, zName); + sqlite3CteDelete(db, pCte); pNew = pWith; }else{ - pNew->a[pNew->nCte].pSelect = pQuery; - pNew->a[pNew->nCte].pCols = pArglist; - pNew->a[pNew->nCte].zName = zName; - pNew->a[pNew->nCte].zCteErr = 0; - pNew->nCte++; + pNew->a[pNew->nCte++] = *pCte; + sqlite3DbFree(db, pCte); } return pNew; @@ -116026,10 +116894,7 @@ SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){ if( pWith ){ int i; for(i=0; i<pWith->nCte; i++){ - struct Cte *pCte = &pWith->a[i]; - sqlite3ExprListDelete(db, pCte->pCols); - sqlite3SelectDelete(db, pCte->pSelect); - sqlite3DbFree(db, pCte->zName); + cteClear(db, &pWith->a[i]); } sqlite3DbFree(db, pWith); } @@ -116608,7 +117473,7 @@ SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ ** */ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ - struct SrcList_item *pItem = pSrc->a; + SrcItem *pItem = pSrc->a; Table *pTab; assert( pItem && pSrc->nSrc>=1 ); pTab = sqlite3LocateTableItem(pParse, 0, pItem); @@ -116616,9 +117481,9 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ pItem->pTab = pTab; if( pTab ){ pTab->nTabRef++; - } - if( sqlite3IndexedByLookup(pParse, pItem) ){ - pTab = 0; + if( pItem->fg.isIndexedBy && sqlite3IndexedByLookup(pParse, pItem) ){ + pTab = 0; + } } return pTab; } @@ -116786,9 +117651,15 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree ** and the SELECT subtree. */ pSrc->a[0].pTab = 0; - pSelectSrc = sqlite3SrcListDup(pParse->db, pSrc, 0); + pSelectSrc = sqlite3SrcListDup(db, pSrc, 0); pSrc->a[0].pTab = pTab; - pSrc->a[0].pIBIndex = 0; + if( pSrc->a[0].fg.isIndexedBy ){ + pSrc->a[0].u2.pIBIndex = 0; + pSrc->a[0].fg.isIndexedBy = 0; + sqlite3DbFree(db, pSrc->a[0].u1.zIndexedBy); + }else if( pSrc->a[0].fg.isCte ){ + pSrc->a[0].u2.pCteUse->nUse++; + } /* generate the SELECT expression tree. */ pSelect = sqlite3SelectNew(pParse, pEList, pSelectSrc, pWhere, 0 ,0, @@ -116966,6 +117837,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( if( (db->flags & SQLITE_CountRows)!=0 && !pParse->nested && !pParse->pTriggerTab + && !pParse->bReturning ){ memCnt = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, memCnt); @@ -117187,7 +118059,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( ** invoke the callback function. */ if( memCnt ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, memCnt, 1); + sqlite3VdbeAddOp2(v, OP_ChngCntRow, memCnt, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", SQLITE_STATIC); } @@ -118235,7 +119107,8 @@ static int patternCompare( /* Skip over multiple "*" characters in the pattern. If there ** are also "?" characters, skip those as well, but consume a ** single character of the input string for each "?" skipped */ - while( (c=Utf8Read(zPattern)) == matchAll || c == matchOne ){ + while( (c=Utf8Read(zPattern)) == matchAll + || (c == matchOne && matchOne!=0) ){ if( c==matchOne && sqlite3Utf8Read(&zString)==0 ){ return SQLITE_NOWILDCARDMATCH; } @@ -119406,7 +120279,9 @@ SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive) SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocase, char *aWc){ FuncDef *pDef; int nExpr; - if( pExpr->op!=TK_FUNCTION || !pExpr->x.pList ){ + assert( pExpr!=0 ); + assert( pExpr->op==TK_FUNCTION ); + if( !pExpr->x.pList ){ return 0; } assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); @@ -119445,6 +120320,203 @@ SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocas return 1; } +/* Mathematical Constants */ +#ifndef M_PI +# define M_PI 3.141592653589793238462643383279502884 +#endif +#ifndef M_LN10 +# define M_LN10 2.302585092994045684017991454684364208 +#endif +#ifndef M_LN2 +# define M_LN2 0.693147180559945309417232121458176568 +#endif + + +/* Extra math functions that require linking with -lm +*/ +#ifdef SQLITE_ENABLE_MATH_FUNCTIONS +/* +** Implementation SQL functions: +** +** ceil(X) +** ceiling(X) +** floor(X) +** +** The sqlite3_user_data() pointer is a pointer to the libm implementation +** of the underlying C function. +*/ +static void ceilingFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + assert( argc==1 ); + switch( sqlite3_value_numeric_type(argv[0]) ){ + case SQLITE_INTEGER: { + sqlite3_result_int64(context, sqlite3_value_int64(argv[0])); + break; + } + case SQLITE_FLOAT: { + double (*x)(double) = (double(*)(double))sqlite3_user_data(context); + sqlite3_result_double(context, x(sqlite3_value_double(argv[0]))); + break; + } + default: { + break; + } + } +} + +/* +** On some systems, ceil() and floor() are intrinsic function. You are +** unable to take a pointer to these functions. Hence, we here wrap them +** in our own actual functions. +*/ +static double xCeil(double x){ return ceil(x); } +static double xFloor(double x){ return floor(x); } + +/* +** Implementation of SQL functions: +** +** ln(X) - natural logarithm +** log(X) - log X base 10 +** log10(X) - log X base 10 +** log(B,X) - log X base B +*/ +static void logFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + double x, b, ans; + assert( argc==1 || argc==2 ); + switch( sqlite3_value_numeric_type(argv[0]) ){ + case SQLITE_INTEGER: + case SQLITE_FLOAT: + x = sqlite3_value_double(argv[0]); + if( x<=0.0 ) return; + break; + default: + return; + } + if( argc==2 ){ + switch( sqlite3_value_numeric_type(argv[0]) ){ + case SQLITE_INTEGER: + case SQLITE_FLOAT: + b = log(x); + if( b<=0.0 ) return; + x = sqlite3_value_double(argv[1]); + if( x<=0.0 ) return; + break; + default: + return; + } + ans = log(x)/b; + }else{ + ans = log(x); + switch( SQLITE_PTR_TO_INT(sqlite3_user_data(context)) ){ + case 1: + /* Convert from natural logarithm to log base 10 */ + ans *= 1.0/M_LN10; + break; + case 2: + /* Convert from natural logarithm to log base 2 */ + ans *= 1.0/M_LN2; + break; + default: + break; + } + } + sqlite3_result_double(context, ans); +} + +/* +** Functions to converts degrees to radians and radians to degrees. +*/ +static double degToRad(double x){ return x*(M_PI/180.0); } +static double radToDeg(double x){ return x*(180.0/M_PI); } + +/* +** Implementation of 1-argument SQL math functions: +** +** exp(X) - Compute e to the X-th power +*/ +static void math1Func( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int type0; + double v0, ans; + double (*x)(double); + assert( argc==1 ); + type0 = sqlite3_value_numeric_type(argv[0]); + if( type0!=SQLITE_INTEGER && type0!=SQLITE_FLOAT ) return; + v0 = sqlite3_value_double(argv[0]); + x = (double(*)(double))sqlite3_user_data(context); + ans = x(v0); + sqlite3_result_double(context, ans); +} + +/* +** Implementation of 2-argument SQL math functions: +** +** power(X,Y) - Compute X to the Y-th power +*/ +static void math2Func( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int type0, type1; + double v0, v1, ans; + double (*x)(double,double); + assert( argc==2 ); + type0 = sqlite3_value_numeric_type(argv[0]); + if( type0!=SQLITE_INTEGER && type0!=SQLITE_FLOAT ) return; + type1 = sqlite3_value_numeric_type(argv[1]); + if( type1!=SQLITE_INTEGER && type1!=SQLITE_FLOAT ) return; + v0 = sqlite3_value_double(argv[0]); + v1 = sqlite3_value_double(argv[1]); + x = (double(*)(double,double))sqlite3_user_data(context); + ans = x(v0, v1); + sqlite3_result_double(context, ans); +} + +/* +** Implementation of 2-argument SQL math functions: +** +** power(X,Y) - Compute X to the Y-th power +*/ +static void piFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + assert( argc==0 ); + sqlite3_result_double(context, M_PI); +} + +#endif /* SQLITE_ENABLE_MATH_FUNCTIONS */ + +/* +** Implementation of sign(X) function. +*/ +static void signFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int type0; + double x; + UNUSED_PARAMETER(argc); + assert( argc==1 ); + type0 = sqlite3_value_numeric_type(argv[0]); + if( type0!=SQLITE_INTEGER && type0!=SQLITE_FLOAT ) return; + x = sqlite3_value_double(argv[0]); + sqlite3_result_int(context, x<0.0 ? -1 : x>0.0 ? +1 : 0); +} + /* ** All of the FuncDef structures in the aBuiltinFunc[] array above ** to the global function hash table. This occurs at start-time (as @@ -119563,6 +120635,43 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ #endif FUNCTION(coalesce, 1, 0, 0, 0 ), FUNCTION(coalesce, 0, 0, 0, 0 ), +#ifdef SQLITE_ENABLE_MATH_FUNCTIONS + MFUNCTION(ceil, 1, xCeil, ceilingFunc ), + MFUNCTION(ceiling, 1, xCeil, ceilingFunc ), + MFUNCTION(floor, 1, xFloor, ceilingFunc ), +#if SQLITE_HAVE_C99_MATH_FUNCS + MFUNCTION(trunc, 1, trunc, ceilingFunc ), +#endif + FUNCTION(ln, 1, 0, 0, logFunc ), + FUNCTION(log, 1, 1, 0, logFunc ), + FUNCTION(log10, 1, 1, 0, logFunc ), + FUNCTION(log2, 1, 2, 0, logFunc ), + FUNCTION(log, 2, 0, 0, logFunc ), + MFUNCTION(exp, 1, exp, math1Func ), + MFUNCTION(pow, 2, pow, math2Func ), + MFUNCTION(power, 2, pow, math2Func ), + MFUNCTION(mod, 2, fmod, math2Func ), + MFUNCTION(acos, 1, acos, math1Func ), + MFUNCTION(asin, 1, asin, math1Func ), + MFUNCTION(atan, 1, atan, math1Func ), + MFUNCTION(atan2, 2, atan2, math2Func ), + MFUNCTION(cos, 1, cos, math1Func ), + MFUNCTION(sin, 1, sin, math1Func ), + MFUNCTION(tan, 1, tan, math1Func ), + MFUNCTION(cosh, 1, cosh, math1Func ), + MFUNCTION(sinh, 1, sinh, math1Func ), + MFUNCTION(tanh, 1, tanh, math1Func ), +#if SQLITE_HAVE_C99_MATH_FUNCS + MFUNCTION(acosh, 1, acosh, math1Func ), + MFUNCTION(asinh, 1, asinh, math1Func ), + MFUNCTION(atanh, 1, atanh, math1Func ), +#endif + MFUNCTION(sqrt, 1, sqrt, math1Func ), + MFUNCTION(radians, 1, degToRad, math1Func ), + MFUNCTION(degrees, 1, radToDeg, math1Func ), + FUNCTION(pi, 0, 0, 0, piFunc ), +#endif /* SQLITE_ENABLE_MATH_FUNCTIONS */ + FUNCTION(sign, 1, 0, 0, signFunc ), INLINE_FUNC(coalesce, -1, INLINEFUNC_coalesce, 0 ), INLINE_FUNC(iif, 3, INLINEFUNC_iif, 0 ), }; @@ -120618,7 +121727,7 @@ SQLITE_PRIVATE void sqlite3FkCheck( ** child table as a SrcList for sqlite3WhereBegin() */ pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0); if( pSrc ){ - struct SrcList_item *pItem = pSrc->a; + SrcItem *pItem = pSrc->a; pItem->pTab = pFKey->pFrom; pItem->zName = pFKey->pFrom->zName; pItem->pTab->nTabRef++; @@ -120706,7 +121815,9 @@ SQLITE_PRIVATE u32 sqlite3FkOldmask( ** ** For an UPDATE, this function returns 2 if: ** -** * There are any FKs for which pTab is the child and the parent table, or +** * There are any FKs for which pTab is the child and the parent table +** and any FK processing at all is required (even of a different FK), or +** ** * the UPDATE modifies one or more parent keys for which the action is ** not "NO ACTION" (i.e. is CASCADE, SET DEFAULT or SET NULL). ** @@ -120718,13 +121829,14 @@ SQLITE_PRIVATE int sqlite3FkRequired( int *aChange, /* Non-NULL for UPDATE operations */ int chngRowid /* True for UPDATE that affects rowid */ ){ - int eRet = 0; + int eRet = 1; /* Value to return if bHaveFK is true */ + int bHaveFK = 0; /* If FK processing is required */ if( pParse->db->flags&SQLITE_ForeignKeys ){ if( !aChange ){ /* A DELETE operation. Foreign key processing is required if the ** table in question is either the child or parent table for any ** foreign key constraint. */ - eRet = (sqlite3FkReferences(pTab) || pTab->pFKey); + bHaveFK = (sqlite3FkReferences(pTab) || pTab->pFKey); }else{ /* This is an UPDATE. Foreign key processing is only required if the ** operation modifies one or more child or parent key columns. */ @@ -120732,9 +121844,9 @@ SQLITE_PRIVATE int sqlite3FkRequired( /* Check if any child key columns are being modified. */ for(p=pTab->pFKey; p; p=p->pNextFrom){ - if( 0==sqlite3_stricmp(pTab->zName, p->zTo) ) return 2; if( fkChildIsModified(pTab, p, aChange, chngRowid) ){ - eRet = 1; + if( 0==sqlite3_stricmp(pTab->zName, p->zTo) ) eRet = 2; + bHaveFK = 1; } } @@ -120742,12 +121854,12 @@ SQLITE_PRIVATE int sqlite3FkRequired( for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ if( fkParentIsModified(pTab, p, aChange, chngRowid) ){ if( p->aAction[1]!=OE_None ) return 2; - eRet = 1; + bHaveFK = 1; } } } } - return eRet; + return bHaveFK ? eRet : 0; } /* @@ -121416,7 +122528,9 @@ static int autoIncBegin( while( pInfo && pInfo->pTab!=pTab ){ pInfo = pInfo->pNext; } if( pInfo==0 ){ pInfo = sqlite3DbMallocRawNN(pParse->db, sizeof(*pInfo)); - if( pInfo==0 ) return 0; + sqlite3ParserAddCleanup(pToplevel, sqlite3DbFree, pInfo); + testcase( pParse->earlyCleanup ); + if( pParse->db->mallocFailed ) return 0; pInfo->pNext = pToplevel->pAinc; pToplevel->pAinc = pInfo; pInfo->pTab = pTab; @@ -121974,19 +123088,24 @@ SQLITE_PRIVATE void sqlite3Insert( } } #endif - } - /* Make sure the number of columns in the source data matches the number - ** of columns to be inserted into the table. - */ - for(i=0; i<pTab->nCol; i++){ - if( pTab->aCol[i].colFlags & COLFLAG_NOINSERT ) nHidden++; - } - if( pColumn==0 && nColumn && nColumn!=(pTab->nCol-nHidden) ){ - sqlite3ErrorMsg(pParse, - "table %S has %d columns but %d values were supplied", - pTabList, 0, pTab->nCol-nHidden, nColumn); - goto insert_cleanup; + /* Make sure the number of columns in the source data matches the number + ** of columns to be inserted into the table. + */ + assert( TF_HasHidden==COLFLAG_HIDDEN ); + assert( TF_HasGenerated==COLFLAG_GENERATED ); + assert( COLFLAG_NOINSERT==(COLFLAG_GENERATED|COLFLAG_HIDDEN) ); + if( (pTab->tabFlags & (TF_HasGenerated|TF_HasHidden))!=0 ){ + for(i=0; i<pTab->nCol; i++){ + if( pTab->aCol[i].colFlags & COLFLAG_NOINSERT ) nHidden++; + } + } + if( nColumn!=(pTab->nCol-nHidden) ){ + sqlite3ErrorMsg(pParse, + "table %S has %d columns but %d values were supplied", + pTabList, 0, pTab->nCol-nHidden, nColumn); + goto insert_cleanup; + } } if( pColumn!=0 && nColumn!=pColumn->nId ){ sqlite3ErrorMsg(pParse, "%d values for %d columns", nColumn, pColumn->nId); @@ -121998,6 +123117,7 @@ SQLITE_PRIVATE void sqlite3Insert( if( (db->flags & SQLITE_CountRows)!=0 && !pParse->nested && !pParse->pTriggerTab + && !pParse->bReturning ){ regRowCount = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); @@ -122021,6 +123141,7 @@ SQLITE_PRIVATE void sqlite3Insert( } #ifndef SQLITE_OMIT_UPSERT if( pUpsert ){ + Upsert *pNx; if( IsVirtual(pTab) ){ sqlite3ErrorMsg(pParse, "UPSERT not implemented for virtual table \"%s\"", pTab->zName); @@ -122034,13 +123155,19 @@ SQLITE_PRIVATE void sqlite3Insert( goto insert_cleanup; } pTabList->a[0].iCursor = iDataCur; - pUpsert->pUpsertSrc = pTabList; - pUpsert->regData = regData; - pUpsert->iDataCur = iDataCur; - pUpsert->iIdxCur = iIdxCur; - if( pUpsert->pUpsertTarget ){ - sqlite3UpsertAnalyzeTarget(pParse, pTabList, pUpsert); - } + pNx = pUpsert; + do{ + pNx->pUpsertSrc = pTabList; + pNx->regData = regData; + pNx->iDataCur = iDataCur; + pNx->iIdxCur = iIdxCur; + if( pNx->pUpsertTarget ){ + if( sqlite3UpsertAnalyzeTarget(pParse, pTabList, pNx) ){ + goto insert_cleanup; + } + } + pNx = pNx->pNextUpsert; + }while( pNx!=0 ); } #endif @@ -122181,11 +123308,6 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3VdbeAddOp1(v, OP_MustBeInt, regCols); VdbeCoverage(v); } - /* Cannot have triggers on a virtual table. If it were possible, - ** this block would have to account for hidden column. - */ - assert( !IsVirtual(pTab) ); - /* Copy the new data already generated. */ assert( pTab->nNVCol>0 ); sqlite3VdbeAddOp3(v, OP_Copy, regRowid+1, regCols+1, pTab->nNVCol-1); @@ -122340,7 +123462,9 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3VdbeJumpHere(v, addrInsTop); } +#ifndef SQLITE_OMIT_XFER_OPT insert_end: +#endif /* SQLITE_OMIT_XFER_OPT */ /* Update the sqlite_sequence table by storing the content of the ** maximum rowid counter values recorded while inserting into ** autoincrement tables. @@ -122355,7 +123479,7 @@ insert_end: ** invoke the callback function. */ if( regRowCount ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); + sqlite3VdbeAddOp2(v, OP_ChngCntRow, regRowCount, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", SQLITE_STATIC); } @@ -122445,6 +123569,70 @@ SQLITE_PRIVATE int sqlite3ExprReferencesUpdatedColumn( return w.eCode!=0; } +/* +** The sqlite3GenerateConstraintChecks() routine usually wants to visit +** the indexes of a table in the order provided in the Table->pIndex list. +** However, sometimes (rarely - when there is an upsert) it wants to visit +** the indexes in a different order. The following data structures accomplish +** this. +** +** The IndexIterator object is used to walk through all of the indexes +** of a table in either Index.pNext order, or in some other order established +** by an array of IndexListTerm objects. +*/ +typedef struct IndexListTerm IndexListTerm; +typedef struct IndexIterator IndexIterator; +struct IndexIterator { + int eType; /* 0 for Index.pNext list. 1 for an array of IndexListTerm */ + int i; /* Index of the current item from the list */ + union { + struct { /* Use this object for eType==0: A Index.pNext list */ + Index *pIdx; /* The current Index */ + } lx; + struct { /* Use this object for eType==1; Array of IndexListTerm */ + int nIdx; /* Size of the array */ + IndexListTerm *aIdx; /* Array of IndexListTerms */ + } ax; + } u; +}; + +/* When IndexIterator.eType==1, then each index is an array of instances +** of the following object +*/ +struct IndexListTerm { + Index *p; /* The index */ + int ix; /* Which entry in the original Table.pIndex list is this index*/ +}; + +/* Return the first index on the list */ +static Index *indexIteratorFirst(IndexIterator *pIter, int *pIx){ + assert( pIter->i==0 ); + if( pIter->eType ){ + *pIx = pIter->u.ax.aIdx[0].ix; + return pIter->u.ax.aIdx[0].p; + }else{ + *pIx = 0; + return pIter->u.lx.pIdx; + } +} + +/* Return the next index from the list. Return NULL when out of indexes */ +static Index *indexIteratorNext(IndexIterator *pIter, int *pIx){ + if( pIter->eType ){ + int i = ++pIter->i; + if( i>=pIter->u.ax.nIdx ){ + *pIx = i; + return 0; + } + *pIx = pIter->u.ax.aIdx[i].ix; + return pIter->u.ax.aIdx[i].p; + }else{ + ++(*pIx); + pIter->u.lx.pIdx = pIter->u.lx.pIdx->pNext; + return pIter->u.lx.pIdx; + } +} + /* ** Generate code to do constraint checks prior to an INSERT or an UPDATE ** on table pTab. @@ -122553,7 +123741,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ){ Vdbe *v; /* VDBE under constrution */ Index *pIdx; /* Pointer to one of the indices */ - Index *pPk = 0; /* The PRIMARY KEY index */ + Index *pPk = 0; /* The PRIMARY KEY index for WITHOUT ROWID tables */ sqlite3 *db; /* Database connection */ int i; /* loop counter */ int ix; /* Index loop counter */ @@ -122561,11 +123749,11 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( int onError; /* Conflict resolution strategy */ int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */ int nPkField; /* Number of fields in PRIMARY KEY. 1 for ROWID tables */ - Index *pUpIdx = 0; /* Index to which to apply the upsert */ - u8 isUpdate; /* True if this is an UPDATE operation */ + Upsert *pUpsertClause = 0; /* The specific ON CONFLICT clause for pIdx */ + u8 isUpdate; /* True if this is an UPDATE operation */ u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */ - int upsertBypass = 0; /* Address of Goto to bypass upsert subroutine */ - int upsertJump = 0; /* Address of Goto that jumps into upsert subroutine */ + int upsertIpkReturn = 0; /* Address of Goto at end of IPK uniqueness check */ + int upsertIpkDelay = 0; /* Address of Goto to bypass initial IPK check */ int ipkTop = 0; /* Top of the IPK uniqueness check */ int ipkBottom = 0; /* OP_Goto at the end of the IPK uniqueness check */ /* Variables associated with retesting uniqueness constraints after @@ -122575,6 +123763,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( int lblRecheckOk = 0; /* Each recheck jumps to this label if it passes */ Trigger *pTrigger; /* List of DELETE triggers on the table pTab */ int nReplaceTrig = 0; /* Number of replace triggers coded */ + IndexIterator sIdxIter; /* Index iterator */ isUpdate = regOldData!=0; db = pParse->db; @@ -122772,19 +123961,63 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** list of indexes attached to a table puts all OE_Replace indexes last ** in the list. See sqlite3CreateIndex() for where that happens. */ - + sIdxIter.eType = 0; + sIdxIter.i = 0; + sIdxIter.u.ax.aIdx = 0; /* Silence harmless compiler warning */ + sIdxIter.u.lx.pIdx = pTab->pIndex; if( pUpsert ){ if( pUpsert->pUpsertTarget==0 ){ - /* An ON CONFLICT DO NOTHING clause, without a constraint-target. - ** Make all unique constraint resolution be OE_Ignore */ - assert( pUpsert->pUpsertSet==0 ); - overrideError = OE_Ignore; - pUpsert = 0; - }else if( (pUpIdx = pUpsert->pUpsertIdx)!=0 ){ - /* If the constraint-target uniqueness check must be run first. - ** Jump to that uniqueness check now */ - upsertJump = sqlite3VdbeAddOp0(v, OP_Goto); - VdbeComment((v, "UPSERT constraint goes first")); + /* There is just on ON CONFLICT clause and it has no constraint-target */ + assert( pUpsert->pNextUpsert==0 ); + if( pUpsert->isDoUpdate==0 ){ + /* A single ON CONFLICT DO NOTHING clause, without a constraint-target. + ** Make all unique constraint resolution be OE_Ignore */ + overrideError = OE_Ignore; + pUpsert = 0; + }else{ + /* A single ON CONFLICT DO UPDATE. Make all resolutions OE_Update */ + overrideError = OE_Update; + } + }else if( pTab->pIndex!=0 ){ + /* Otherwise, we'll need to run the IndexListTerm array version of the + ** iterator to ensure that all of the ON CONFLICT conditions are + ** checked first and in order. */ + int nIdx, jj; + u64 nByte; + Upsert *pTerm; + u8 *bUsed; + for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ + assert( aRegIdx[nIdx]>0 ); + } + sIdxIter.eType = 1; + sIdxIter.u.ax.nIdx = nIdx; + nByte = (sizeof(IndexListTerm)+1)*nIdx + nIdx; + sIdxIter.u.ax.aIdx = sqlite3DbMallocZero(db, nByte); + if( sIdxIter.u.ax.aIdx==0 ) return; /* OOM */ + bUsed = (u8*)&sIdxIter.u.ax.aIdx[nIdx]; + pUpsert->pToFree = sIdxIter.u.ax.aIdx; + for(i=0, pTerm=pUpsert; pTerm; pTerm=pTerm->pNextUpsert){ + if( pTerm->pUpsertTarget==0 ) break; + if( pTerm->pUpsertIdx==0 ) continue; /* Skip ON CONFLICT for the IPK */ + jj = 0; + pIdx = pTab->pIndex; + while( ALWAYS(pIdx!=0) && pIdx!=pTerm->pUpsertIdx ){ + pIdx = pIdx->pNext; + jj++; + } + if( bUsed[jj] ) continue; /* Duplicate ON CONFLICT clause ignored */ + bUsed[jj] = 1; + sIdxIter.u.ax.aIdx[i].p = pIdx; + sIdxIter.u.ax.aIdx[i].ix = jj; + i++; + } + for(jj=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, jj++){ + if( bUsed[jj] ) continue; + sIdxIter.u.ax.aIdx[i].p = pIdx; + sIdxIter.u.ax.aIdx[i].ix = jj; + i++; + } + assert( i==nIdx ); } } @@ -122847,11 +124080,20 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } /* figure out whether or not upsert applies in this case */ - if( pUpsert && pUpsert->pUpsertIdx==0 ){ - if( pUpsert->pUpsertSet==0 ){ - onError = OE_Ignore; /* DO NOTHING is the same as INSERT OR IGNORE */ - }else{ - onError = OE_Update; /* DO UPDATE */ + if( pUpsert ){ + pUpsertClause = sqlite3UpsertOfIndex(pUpsert,0); + if( pUpsertClause!=0 ){ + if( pUpsertClause->isDoUpdate==0 ){ + onError = OE_Ignore; /* DO NOTHING is the same as INSERT OR IGNORE */ + }else{ + onError = OE_Update; /* DO UPDATE */ + } + } + if( pUpsertClause!=pUpsert ){ + /* The first ON CONFLICT clause has a conflict target other than + ** the IPK. We have to jump ahead to that first ON CONFLICT clause + ** and then come back here and deal with the IPK afterwards */ + upsertIpkDelay = sqlite3VdbeAddOp0(v, OP_Goto); } } @@ -122861,7 +124103,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** the UNIQUE constraints have run. */ if( onError==OE_Replace /* IPK rule is REPLACE */ - && onError!=overrideError /* Rules for other contraints are different */ + && onError!=overrideError /* Rules for other constraints are different */ && pTab->pIndex /* There exist other constraints */ ){ ipkTop = sqlite3VdbeAddOp0(v, OP_Goto)+1; @@ -122958,7 +124200,9 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } } sqlite3VdbeResolveLabel(v, addrRowidOk); - if( ipkTop ){ + if( pUpsert && pUpsertClause!=pUpsert ){ + upsertIpkReturn = sqlite3VdbeAddOp0(v, OP_Goto); + }else if( ipkTop ){ ipkBottom = sqlite3VdbeAddOp0(v, OP_Goto); sqlite3VdbeJumpHere(v, ipkTop-1); } @@ -122971,7 +124215,10 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** This loop also handles the case of the PRIMARY KEY index for a ** WITHOUT ROWID table. */ - for(ix=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, ix++){ + for(pIdx = indexIteratorFirst(&sIdxIter, &ix); + pIdx; + pIdx = indexIteratorNext(&sIdxIter, &ix) + ){ int regIdx; /* Range of registers hold conent for pIdx */ int regR; /* Range of registers holding conflicting PK */ int iThisCur; /* Cursor for this UNIQUE index */ @@ -122979,15 +124226,14 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( int addrConflictCk; /* First opcode in the conflict check logic */ if( aRegIdx[ix]==0 ) continue; /* Skip indices that do not change */ - if( pUpIdx==pIdx ){ - addrUniqueOk = upsertJump+1; - upsertBypass = sqlite3VdbeGoto(v, 0); - VdbeComment((v, "Skip upsert subroutine")); - sqlite3VdbeJumpHere(v, upsertJump); - }else{ - addrUniqueOk = sqlite3VdbeMakeLabel(pParse); + if( pUpsert ){ + pUpsertClause = sqlite3UpsertOfIndex(pUpsert, pIdx); + if( upsertIpkDelay && pUpsertClause==pUpsert ){ + sqlite3VdbeJumpHere(v, upsertIpkDelay); + } } - if( bAffinityDone==0 && (pUpIdx==0 || pUpIdx==pIdx) ){ + addrUniqueOk = sqlite3VdbeMakeLabel(pParse); + if( bAffinityDone==0 ){ sqlite3TableAffinity(v, pTab, regNewData+1); bAffinityDone = 1; } @@ -123058,8 +124304,8 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } /* Figure out if the upsert clause applies to this index */ - if( pUpIdx==pIdx ){ - if( pUpsert->pUpsertSet==0 ){ + if( pUpsertClause ){ + if( pUpsertClause->isDoUpdate==0 ){ onError = OE_Ignore; /* DO NOTHING is the same as INSERT OR IGNORE */ }else{ onError = OE_Update; /* DO UPDATE */ @@ -123097,7 +124343,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( regIdx, pIdx->nKeyCol); VdbeCoverage(v); /* Generate code to handle collisions */ - regR = (pIdx==pPk) ? regIdx : sqlite3GetTempRange(pParse, nPkField); + regR = pIdx==pPk ? regIdx : sqlite3GetTempRange(pParse, nPkField); if( isUpdate || onError==OE_Replace ){ if( HasRowid(pTab) ){ sqlite3VdbeAddOp2(v, OP_IdxRowid, iThisCur, regR); @@ -123249,13 +124495,16 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( break; } } - if( pUpIdx==pIdx ){ - sqlite3VdbeGoto(v, upsertJump+1); - sqlite3VdbeJumpHere(v, upsertBypass); - }else{ - sqlite3VdbeResolveLabel(v, addrUniqueOk); - } + sqlite3VdbeResolveLabel(v, addrUniqueOk); if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField); + if( pUpsertClause + && upsertIpkReturn + && sqlite3UpsertNextIsIPK(pUpsertClause) + ){ + sqlite3VdbeGoto(v, upsertIpkDelay+1); + sqlite3VdbeJumpHere(v, upsertIpkReturn); + upsertIpkReturn = 0; + } } /* If the IPK constraint is a REPLACE, run it last */ @@ -123321,6 +124570,32 @@ SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe *v, Table *pTab){ } #endif +/* +** Table pTab is a WITHOUT ROWID table that is being written to. The cursor +** number is iCur, and register regData contains the new record for the +** PK index. This function adds code to invoke the pre-update hook, +** if one is registered. +*/ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +static void codeWithoutRowidPreupdate( + Parse *pParse, /* Parse context */ + Table *pTab, /* Table being updated */ + int iCur, /* Cursor number for table */ + int regData /* Data containing new record */ +){ + Vdbe *v = pParse->pVdbe; + int r = sqlite3GetTempReg(pParse); + assert( !HasRowid(pTab) ); + assert( 0==(pParse->db->mDbFlags & DBFLAG_Vacuum) || CORRUPT_DB ); + sqlite3VdbeAddOp2(v, OP_Integer, 0, r); + sqlite3VdbeAddOp4(v, OP_Insert, iCur, regData, r, (char*)pTab, P4_TABLE); + sqlite3VdbeChangeP5(v, OPFLAG_ISNOOP); + sqlite3ReleaseTempReg(pParse, r); +} +#else +# define codeWithoutRowidPreupdate(a,b,c,d) +#endif + /* ** This routine generates code to finish the INSERT or UPDATE operation ** that was started by a prior call to sqlite3GenerateConstraintChecks. @@ -123369,17 +124644,9 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( assert( pParse->nested==0 ); pik_flags |= OPFLAG_NCHANGE; pik_flags |= (update_flags & OPFLAG_SAVEPOSITION); -#ifdef SQLITE_ENABLE_PREUPDATE_HOOK if( update_flags==0 ){ - int r = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp2(v, OP_Integer, 0, r); - sqlite3VdbeAddOp4(v, OP_Insert, - iIdxCur+i, aRegIdx[i], r, (char*)pTab, P4_TABLE - ); - sqlite3VdbeChangeP5(v, OPFLAG_ISNOOP); - sqlite3ReleaseTempReg(pParse, r); + codeWithoutRowidPreupdate(pParse, pTab, iIdxCur+i, aRegIdx[i]); } -#endif } sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i], aRegIdx[i]+1, @@ -123577,7 +124844,7 @@ static int xferOptimization( ExprList *pEList; /* The result set of the SELECT */ Table *pSrc; /* The table in the FROM clause of SELECT */ Index *pSrcIdx, *pDestIdx; /* Source and destination indices */ - struct SrcList_item *pItem; /* An element of pSelect->pSrc */ + SrcItem *pItem; /* An element of pSelect->pSrc */ int i; /* Loop counter */ int iDbSrc; /* The database of pSrc */ int iSrc, iDest; /* Cursors from source and destination */ @@ -123794,6 +125061,7 @@ static int xferOptimization( iDest = pParse->nTab++; regAutoinc = autoIncBegin(pParse, iDbDest, pDest); regData = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_Null, 0, regData); regRowid = sqlite3GetTempReg(pParse); sqlite3OpenTable(pParse, iDest, iDbDest, pDest, OP_OpenWrite); assert( HasRowid(pDest) || destHasUniqueIdx ); @@ -123829,11 +125097,13 @@ static int xferOptimization( emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); if( pDest->iPKey>=0 ){ addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); - sqlite3VdbeVerifyAbortable(v, onError); - addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid); - VdbeCoverage(v); - sqlite3RowidConstraint(pParse, onError, pDest); - sqlite3VdbeJumpHere(v, addr2); + if( (db->mDbFlags & DBFLAG_Vacuum)==0 ){ + sqlite3VdbeVerifyAbortable(v, onError); + addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid); + VdbeCoverage(v); + sqlite3RowidConstraint(pParse, onError, pDest); + sqlite3VdbeJumpHere(v, addr2); + } autoIncStep(pParse, regAutoinc, regRowid); }else if( pDest->pIndex==0 && !(db->mDbFlags & DBFLAG_VacuumInto) ){ addr1 = sqlite3VdbeAddOp2(v, OP_NewRowid, iDest, regRowid); @@ -123841,16 +125111,28 @@ static int xferOptimization( addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); assert( (pDest->tabFlags & TF_Autoincrement)==0 ); } + if( db->mDbFlags & DBFLAG_Vacuum ){ sqlite3VdbeAddOp1(v, OP_SeekEnd, iDest); - insFlags = OPFLAG_APPEND|OPFLAG_USESEEKRESULT; + insFlags = OPFLAG_APPEND|OPFLAG_USESEEKRESULT|OPFLAG_PREFORMAT; }else{ - insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND; + insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND|OPFLAG_PREFORMAT; + } +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( (db->mDbFlags & DBFLAG_Vacuum)==0 ){ + sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); + insFlags &= ~OPFLAG_PREFORMAT; + }else +#endif + { + sqlite3VdbeAddOp3(v, OP_RowCell, iDest, iSrc, regRowid); + } + sqlite3VdbeAddOp3(v, OP_Insert, iDest, regData, regRowid); + if( (db->mDbFlags & DBFLAG_Vacuum)==0 ){ + sqlite3VdbeChangeP4(v, -1, (char*)pDest, P4_TABLE); } - sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); - sqlite3VdbeAddOp4(v, OP_Insert, iDest, regData, regRowid, - (char*)pDest, P4_TABLE); sqlite3VdbeChangeP5(v, insFlags); + sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); @@ -123892,13 +125174,22 @@ static int xferOptimization( if( sqlite3_stricmp(sqlite3StrBINARY, zColl) ) break; } if( i==pSrcIdx->nColumn ){ - idxInsFlags = OPFLAG_USESEEKRESULT; + idxInsFlags = OPFLAG_USESEEKRESULT|OPFLAG_PREFORMAT; sqlite3VdbeAddOp1(v, OP_SeekEnd, iDest); + sqlite3VdbeAddOp2(v, OP_RowCell, iDest, iSrc); } }else if( !HasRowid(pSrc) && pDestIdx->idxType==SQLITE_IDXTYPE_PRIMARYKEY ){ idxInsFlags |= OPFLAG_NCHANGE; } - sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); + if( idxInsFlags!=(OPFLAG_USESEEKRESULT|OPFLAG_PREFORMAT) ){ + sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); + if( (db->mDbFlags & DBFLAG_Vacuum)==0 + && !HasRowid(pDest) + && IsPrimaryKeyIndex(pDestIdx) + ){ + codeWithoutRowidPreupdate(pParse, pDest, iDest, regData); + } + } sqlite3VdbeAddOp2(v, OP_IdxInsert, iDest, regData); sqlite3VdbeChangeP5(v, idxInsFlags|OPFLAG_APPEND); sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); VdbeCoverage(v); @@ -128214,7 +129505,7 @@ SQLITE_PRIVATE void sqlite3Pragma( ** Checkpoint the database. */ case PragTyp_WAL_CHECKPOINT: { - int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED); + int iBt = (pId2->z?iDb:SQLITE_MAX_DB); int eMode = SQLITE_CHECKPOINT_PASSIVE; if( zRight ){ if( sqlite3StrICmp(zRight, "full")==0 ){ @@ -128862,7 +130153,7 @@ SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3 *db, const char *zName) */ static void corruptSchema( InitData *pData, /* Initialization context */ - const char *zObj, /* Object being parsed at the point of error */ + char **azObj, /* Type and name of object being parsed */ const char *zExtra /* Error information */ ){ sqlite3 *db = pData->db; @@ -128870,14 +130161,18 @@ static void corruptSchema( pData->rc = SQLITE_NOMEM_BKPT; }else if( pData->pzErrMsg[0]!=0 ){ /* A error message has already been generated. Do not overwrite it */ - }else if( pData->mInitFlags & INITFLAG_AlterTable ){ - *pData->pzErrMsg = sqlite3DbStrDup(db, zExtra); + }else if( pData->mInitFlags & (INITFLAG_AlterRename|INITFLAG_AlterDrop) ){ + *pData->pzErrMsg = sqlite3MPrintf(db, + "error in %s %s after %s: %s", azObj[0], azObj[1], + (pData->mInitFlags & INITFLAG_AlterRename) ? "rename" : "drop column", + zExtra + ); pData->rc = SQLITE_ERROR; }else if( db->flags & SQLITE_WriteSchema ){ pData->rc = SQLITE_CORRUPT_BKPT; }else{ char *z; - if( zObj==0 ) zObj = "?"; + const char *zObj = azObj[1] ? azObj[1] : "?"; z = sqlite3MPrintf(db, "malformed database schema (%s)", zObj); if( zExtra && zExtra[0] ) z = sqlite3MPrintf(db, "%z - %s", z, zExtra); *pData->pzErrMsg = z; @@ -128935,19 +130230,26 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char db->mDbFlags |= DBFLAG_EncodingFixed; pData->nInitRow++; if( db->mallocFailed ){ - corruptSchema(pData, argv[1], 0); + corruptSchema(pData, argv, 0); return 1; } assert( iDb>=0 && iDb<db->nDb ); if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */ if( argv[3]==0 ){ - corruptSchema(pData, argv[1], 0); - }else if( sqlite3_strnicmp(argv[4],"create ",7)==0 ){ + corruptSchema(pData, argv, 0); + }else if( argv[4] + && 'c'==sqlite3UpperToLower[(unsigned char)argv[4][0]] + && 'r'==sqlite3UpperToLower[(unsigned char)argv[4][1]] ){ /* Call the parser to process a CREATE TABLE, INDEX or VIEW. ** But because db->init.busy is set to 1, no VDBE code is generated ** or executed. All the parser does is build the internal data ** structures that describe the table, index, or view. + ** + ** No other valid SQL statement, other than the variable CREATE statements, + ** can begin with the letters "C" and "R". Thus, it is not possible run + ** any other kind of statement while parsing the schema, even a corrupt + ** schema. */ int rc; u8 saved_iDb = db->init.iDb; @@ -128960,7 +130262,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char || (db->init.newTnum>pData->mxPage && pData->mxPage>0) ){ if( sqlite3Config.bExtraSchemaChecks ){ - corruptSchema(pData, argv[1], "invalid rootpage"); + corruptSchema(pData, argv, "invalid rootpage"); } } db->init.orphanTrigger = 0; @@ -128979,13 +130281,13 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char if( rc==SQLITE_NOMEM ){ sqlite3OomFault(db); }else if( rc!=SQLITE_INTERRUPT && (rc&0xFF)!=SQLITE_LOCKED ){ - corruptSchema(pData, argv[1], sqlite3_errmsg(db)); + corruptSchema(pData, argv, sqlite3_errmsg(db)); } } } sqlite3_finalize(pStmt); }else if( argv[1]==0 || (argv[4]!=0 && argv[4][0]!=0) ){ - corruptSchema(pData, argv[1], 0); + corruptSchema(pData, argv, 0); }else{ /* If the SQL column is blank it means this is an index that ** was created to be the PRIMARY KEY or to fulfill a UNIQUE @@ -128996,7 +130298,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char Index *pIndex; pIndex = sqlite3FindIndex(db, argv[1], db->aDb[iDb].zDbSName); if( pIndex==0 ){ - corruptSchema(pData, argv[1], "orphan index"); + corruptSchema(pData, argv, "orphan index"); }else if( sqlite3GetUInt32(argv[3],&pIndex->tnum)==0 || pIndex->tnum<2 @@ -129004,7 +130306,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char || sqlite3IndexHasDuplicateRootPage(pIndex) ){ if( sqlite3Config.bExtraSchemaChecks ){ - corruptSchema(pData, argv[1], "invalid rootpage"); + corruptSchema(pData, argv, "invalid rootpage"); } } } @@ -129384,28 +130686,21 @@ SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *pSchema){ return i; } -/* -** Deallocate a single AggInfo object -*/ -static void agginfoFree(sqlite3 *db, AggInfo *p){ - sqlite3DbFree(db, p->aCol); - sqlite3DbFree(db, p->aFunc); - sqlite3DbFree(db, p); -} - /* ** Free all memory allocations in the pParse object */ SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ sqlite3 *db = pParse->db; - AggInfo *pThis = pParse->pAggList; - while( pThis ){ - AggInfo *pNext = pThis->pNext; - agginfoFree(db, pThis); - pThis = pNext; + while( pParse->pCleanup ){ + ParseCleanup *pCleanup = pParse->pCleanup; + pParse->pCleanup = pCleanup->pNext; + pCleanup->xCleanup(db, pCleanup->pPtr); + sqlite3DbFreeNN(db, pCleanup); } sqlite3DbFree(db, pParse->aLabel); - sqlite3ExprListDelete(db, pParse->pConstExpr); + if( pParse->pConstExpr ){ + sqlite3ExprListDelete(db, pParse->pConstExpr); + } if( db ){ assert( db->lookaside.bDisable >= pParse->disableLookaside ); db->lookaside.bDisable -= pParse->disableLookaside; @@ -129414,6 +130709,55 @@ SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ pParse->disableLookaside = 0; } +/* +** Add a new cleanup operation to a Parser. The cleanup should happen when +** the parser object is destroyed. But, beware: the cleanup might happen +** immediately. +** +** Use this mechanism for uncommon cleanups. There is a higher setup +** cost for this mechansim (an extra malloc), so it should not be used +** for common cleanups that happen on most calls. But for less +** common cleanups, we save a single NULL-pointer comparison in +** sqlite3ParserReset(), which reduces the total CPU cycle count. +** +** If a memory allocation error occurs, then the cleanup happens immediately. +** When either SQLITE_DEBUG or SQLITE_COVERAGE_TEST are defined, the +** pParse->earlyCleanup flag is set in that case. Calling code show verify +** that test cases exist for which this happens, to guard against possible +** use-after-free errors following an OOM. The preferred way to do this is +** to immediately follow the call to this routine with: +** +** testcase( pParse->earlyCleanup ); +** +** This routine returns a copy of its pPtr input (the third parameter) +** except if an early cleanup occurs, in which case it returns NULL. So +** another way to check for early cleanup is to check the return value. +** Or, stop using the pPtr parameter with this call and use only its +** return value thereafter. Something like this: +** +** pObj = sqlite3ParserAddCleanup(pParse, destructor, pObj); +*/ +SQLITE_PRIVATE void *sqlite3ParserAddCleanup( + Parse *pParse, /* Destroy when this Parser finishes */ + void (*xCleanup)(sqlite3*,void*), /* The cleanup routine */ + void *pPtr /* Pointer to object to be cleaned up */ +){ + ParseCleanup *pCleanup = sqlite3DbMallocRaw(pParse->db, sizeof(*pCleanup)); + if( pCleanup ){ + pCleanup->pNext = pParse->pCleanup; + pParse->pCleanup = pCleanup; + pCleanup->pPtr = pPtr; + pCleanup->xCleanup = xCleanup; + }else{ + xCleanup(pParse->db, pPtr); + pPtr = 0; +#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) + pParse->earlyCleanup = 1; +#endif + } + return pPtr; +} + /* ** Compile the UTF-8 encoded SQL statement zSql into a statement handle. */ @@ -129512,12 +130856,6 @@ static int sqlite3Prepare( } assert( 0==sParse.nQueryLoop ); - if( sParse.rc==SQLITE_DONE ){ - sParse.rc = SQLITE_OK; - } - if( sParse.checkSchema ){ - schemaIsValid(&sParse); - } if( pzTail ){ *pzTail = sParse.zTail; } @@ -129528,20 +130866,28 @@ static int sqlite3Prepare( if( db->mallocFailed ){ sParse.rc = SQLITE_NOMEM_BKPT; } - rc = sParse.rc; - if( rc!=SQLITE_OK ){ - if( sParse.pVdbe ) sqlite3VdbeFinalize(sParse.pVdbe); - assert(!(*ppStmt)); + if( sParse.rc!=SQLITE_OK && sParse.rc!=SQLITE_DONE ){ + if( sParse.checkSchema ){ + schemaIsValid(&sParse); + } + if( sParse.pVdbe ){ + sqlite3VdbeFinalize(sParse.pVdbe); + } + assert( 0==(*ppStmt) ); + rc = sParse.rc; + if( zErrMsg ){ + sqlite3ErrorWithMsg(db, rc, "%s", zErrMsg); + sqlite3DbFree(db, zErrMsg); + }else{ + sqlite3Error(db, rc); + } }else{ + assert( zErrMsg==0 ); *ppStmt = (sqlite3_stmt*)sParse.pVdbe; + rc = SQLITE_OK; + sqlite3ErrorClear(db); } - if( zErrMsg ){ - sqlite3ErrorWithMsg(db, rc, "%s", zErrMsg); - sqlite3DbFree(db, zErrMsg); - }else{ - sqlite3Error(db, rc); - } /* Delete any TriggerPrg structures allocated while parsing this statement. */ while( sParse.pTriggerPrg ){ @@ -129887,12 +131233,16 @@ static void clearSelect(sqlite3 *db, Select *p, int bFree){ sqlite3ExprDelete(db, p->pHaving); sqlite3ExprListDelete(db, p->pOrderBy); sqlite3ExprDelete(db, p->pLimit); + if( OK_IF_ALWAYS_TRUE(p->pWith) ) sqlite3WithDelete(db, p->pWith); #ifndef SQLITE_OMIT_WINDOWFUNC if( OK_IF_ALWAYS_TRUE(p->pWinDefn) ){ sqlite3WindowListDelete(db, p->pWinDefn); } + while( p->pWin ){ + assert( p->pWin->ppThis==&p->pWin ); + sqlite3WindowUnlinkFromSelect(p->pWin); + } #endif - if( OK_IF_ALWAYS_TRUE(p->pWith) ) sqlite3WithDelete(db, p->pWith); if( bFree ) sqlite3DbFreeNN(db, p); p = pPrior; bFree = 1; @@ -130064,7 +131414,7 @@ SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *p ** Return the index of a column in a table. Return -1 if the column ** is not contained in the table. */ -static int columnIndex(Table *pTab, const char *zCol){ +SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){ int i; u8 h = sqlite3StrIHash(zCol); Column *pCol; @@ -130096,7 +131446,7 @@ static int tableAndColumnIndex( assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */ for(i=0; i<N; i++){ - iCol = columnIndex(pSrc->a[i].pTab, zCol); + iCol = sqlite3ColumnIndex(pSrc->a[i].pTab, zCol); if( iCol>=0 && (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pTab->aCol[iCol])==0) ){ @@ -130149,7 +131499,7 @@ static void addWhereTerm( ExprSetProperty(pEq, EP_FromJoin); assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(pEq, EP_NoReduce); - pEq->iRightJoinTable = (i16)pE2->iTable; + pEq->iRightJoinTable = pE2->iTable; } *ppWhere = sqlite3ExprAnd(pParse, *ppWhere, pEq); } @@ -130185,7 +131535,7 @@ SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr *p, int iTable){ ExprSetProperty(p, EP_FromJoin); assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(p, EP_NoReduce); - p->iRightJoinTable = (i16)iTable; + p->iRightJoinTable = iTable; if( p->op==TK_FUNCTION && p->x.pList ){ int i; for(i=0; i<p->x.pList->nExpr; i++){ @@ -130209,6 +131559,9 @@ static void unsetJoinExpr(Expr *p, int iTable){ && (iTable<0 || p->iRightJoinTable==iTable) ){ ExprClearProperty(p, EP_FromJoin); } + if( p->op==TK_COLUMN && p->iTable==iTable ){ + ExprClearProperty(p, EP_CanBeNull); + } if( p->op==TK_FUNCTION && p->x.pList ){ int i; for(i=0; i<p->x.pList->nExpr; i++){ @@ -130237,8 +131590,8 @@ static void unsetJoinExpr(Expr *p, int iTable){ static int sqliteProcessJoin(Parse *pParse, Select *p){ SrcList *pSrc; /* All tables in the FROM clause */ int i, j; /* Loop counters */ - struct SrcList_item *pLeft; /* Left table being joined */ - struct SrcList_item *pRight; /* Right table being joined */ + SrcItem *pLeft; /* Left table being joined */ + SrcItem *pRight; /* Right table being joined */ pSrc = p->pSrc; pLeft = &pSrc->a[0]; @@ -130306,7 +131659,7 @@ static int sqliteProcessJoin(Parse *pParse, Select *p){ int iRightCol; /* Column number of matching column on the right */ zName = pList->a[j].zName; - iRightCol = columnIndex(pRightTab, zName); + iRightCol = sqlite3ColumnIndex(pRightTab, zName); if( iRightCol<0 || !tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol, 0) ){ @@ -131185,7 +132538,7 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoFromExprList( /* ** Name of the connection operator, used for error messages. */ -static const char *selectOpName(int id){ +SQLITE_PRIVATE const char *sqlite3SelectOpName(int id){ char *z; switch( id ){ case TK_ALL: z = "UNION ALL"; break; @@ -131781,7 +133134,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( nCol = pEList->nExpr; aCol = sqlite3DbMallocZero(db, sizeof(aCol[0])*nCol); testcase( aCol==0 ); - if( nCol>32767 ) nCol = 32767; + if( NEVER(nCol>32767) ) nCol = 32767; }else{ nCol = 0; aCol = 0; @@ -131888,6 +133241,7 @@ SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation( for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){ const char *zType; int n, m; + pTab->tabFlags |= (pCol->colFlags & COLFLAG_NOINSERT); p = a[i].pExpr; zType = columnType(&sNC, p, 0, 0, 0); /* pCol->szEst = ... // Column size est for SELECT tables never used */ @@ -132403,12 +133757,8 @@ static int multiSelect( db = pParse->db; pPrior = p->pPrior; dest = *pDest; - if( pPrior->pOrderBy || pPrior->pLimit ){ - sqlite3ErrorMsg(pParse,"%s clause should come after %s not before", - pPrior->pOrderBy!=0 ? "ORDER BY" : "LIMIT", selectOpName(p->op)); - rc = 1; - goto multi_select_end; - } + assert( pPrior->pOrderBy==0 ); + assert( pPrior->pLimit==0 ); v = sqlite3GetVdbe(pParse); assert( v!=0 ); /* The VDBE already created by calling function */ @@ -132465,7 +133815,7 @@ static int multiSelect( pPrior->iOffset = p->iOffset; pPrior->pLimit = p->pLimit; rc = sqlite3Select(pParse, pPrior, &dest); - p->pLimit = 0; + pPrior->pLimit = 0; if( rc ){ goto multi_select_end; } @@ -132486,8 +133836,8 @@ static int multiSelect( pDelete = p->pPrior; p->pPrior = pPrior; p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow); - if( pPrior->pLimit - && sqlite3ExprIsInteger(pPrior->pLimit->pLeft, &nLimit) + if( p->pLimit + && sqlite3ExprIsInteger(p->pLimit->pLeft, &nLimit) && nLimit>0 && p->nSelectRow > sqlite3LogEst((u64)nLimit) ){ p->nSelectRow = sqlite3LogEst((u64)nLimit); @@ -132551,7 +133901,7 @@ static int multiSelect( p->pLimit = 0; uniondest.eDest = op; ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE", - selectOpName(p->op))); + sqlite3SelectOpName(p->op))); rc = sqlite3Select(pParse, p, &uniondest); testcase( rc!=SQLITE_OK ); assert( p->pOrderBy==0 ); @@ -132627,7 +133977,7 @@ static int multiSelect( p->pLimit = 0; intersectdest.iSDParm = tab2; ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE", - selectOpName(p->op))); + sqlite3SelectOpName(p->op))); rc = sqlite3Select(pParse, p, &intersectdest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior; @@ -132736,7 +134086,8 @@ SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){ sqlite3ErrorMsg(pParse, "all VALUES must have the same number of terms"); }else{ sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s" - " do not have the same number of result columns", selectOpName(p->op)); + " do not have the same number of result columns", + sqlite3SelectOpName(p->op)); } } @@ -132833,10 +134184,8 @@ static int generateOutputSubroutine( ** if it is the RHS of a row-value IN operator. */ case SRT_Mem: { - if( pParse->nErr==0 ){ - testcase( pIn->nSdst>1 ); - sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, pIn->nSdst); - } + testcase( pIn->nSdst>1 ); + sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, pIn->nSdst); /* The LIMIT clause will jump out of the loop for us */ break; } @@ -133128,7 +134477,7 @@ static int multiSelectOrderBy( sqlite3SelectDestInit(&destA, SRT_Coroutine, regAddrA); sqlite3SelectDestInit(&destB, SRT_Coroutine, regAddrB); - ExplainQueryPlan((pParse, 1, "MERGE (%s)", selectOpName(p->op))); + ExplainQueryPlan((pParse, 1, "MERGE (%s)", sqlite3SelectOpName(p->op))); /* Generate a coroutine to evaluate the SELECT statement to the ** left of the compound operator - the "A" select. @@ -133398,7 +134747,7 @@ static void substSelect( int doPrior /* Do substitutes on p->pPrior too */ ){ SrcList *pSrc; - struct SrcList_item *pItem; + SrcItem *pItem; int i; if( !p ) return; do{ @@ -133428,7 +134777,7 @@ static void substSelect( ** pSrcItem->colUsed mask. */ static int recomputeColumnsUsedExpr(Walker *pWalker, Expr *pExpr){ - struct SrcList_item *pItem; + SrcItem *pItem; if( pExpr->op!=TK_COLUMN ) return WRC_Continue; pItem = pWalker->u.pSrcItem; if( pItem->iCursor!=pExpr->iTable ) return WRC_Continue; @@ -133438,7 +134787,7 @@ static int recomputeColumnsUsedExpr(Walker *pWalker, Expr *pExpr){ } static void recomputeColumnsUsed( Select *pSelect, /* The complete SELECT statement */ - struct SrcList_item *pSrcItem /* Which FROM clause item to recompute */ + SrcItem *pSrcItem /* Which FROM clause item to recompute */ ){ Walker w; if( NEVER(pSrcItem->pTab==0) ) return; @@ -133451,6 +134800,89 @@ static void recomputeColumnsUsed( } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) +/* +** Assign new cursor numbers to each of the items in pSrc. For each +** new cursor number assigned, set an entry in the aCsrMap[] array +** to map the old cursor number to the new: +** +** aCsrMap[iOld] = iNew; +** +** The array is guaranteed by the caller to be large enough for all +** existing cursor numbers in pSrc. +** +** If pSrc contains any sub-selects, call this routine recursively +** on the FROM clause of each such sub-select, with iExcept set to -1. +*/ +static void srclistRenumberCursors( + Parse *pParse, /* Parse context */ + int *aCsrMap, /* Array to store cursor mappings in */ + SrcList *pSrc, /* FROM clause to renumber */ + int iExcept /* FROM clause item to skip */ +){ + int i; + SrcItem *pItem; + for(i=0, pItem=pSrc->a; i<pSrc->nSrc; i++, pItem++){ + if( i!=iExcept ){ + Select *p; + pItem->iCursor = aCsrMap[pItem->iCursor] = pParse->nTab++; + for(p=pItem->pSelect; p; p=p->pPrior){ + srclistRenumberCursors(pParse, aCsrMap, p->pSrc, -1); + } + } + } +} + +/* +** Expression walker callback used by renumberCursors() to update +** Expr objects to match newly assigned cursor numbers. +*/ +static int renumberCursorsCb(Walker *pWalker, Expr *pExpr){ + int *aCsrMap = pWalker->u.aiCol; + int op = pExpr->op; + if( (op==TK_COLUMN || op==TK_IF_NULL_ROW) && aCsrMap[pExpr->iTable] ){ + pExpr->iTable = aCsrMap[pExpr->iTable]; + } + if( ExprHasProperty(pExpr, EP_FromJoin) && aCsrMap[pExpr->iRightJoinTable] ){ + pExpr->iRightJoinTable = aCsrMap[pExpr->iRightJoinTable]; + } + return WRC_Continue; +} + +/* +** Assign a new cursor number to each cursor in the FROM clause (Select.pSrc) +** of the SELECT statement passed as the second argument, and to each +** cursor in the FROM clause of any FROM clause sub-selects, recursively. +** Except, do not assign a new cursor number to the iExcept'th element in +** the FROM clause of (*p). Update all expressions and other references +** to refer to the new cursor numbers. +** +** Argument aCsrMap is an array that may be used for temporary working +** space. Two guarantees are made by the caller: +** +** * the array is larger than the largest cursor number used within the +** select statement passed as an argument, and +** +** * the array entries for all cursor numbers that do *not* appear in +** FROM clauses of the select statement as described above are +** initialized to zero. +*/ +static void renumberCursors( + Parse *pParse, /* Parse context */ + Select *p, /* Select to renumber cursors within */ + int iExcept, /* FROM clause item to skip */ + int *aCsrMap /* Working space */ +){ + Walker w; + srclistRenumberCursors(pParse, aCsrMap, p->pSrc, iExcept); + memset(&w, 0, sizeof(w)); + w.u.aiCol = aCsrMap; + w.xExprCallback = renumberCursorsCb; + w.xSelectCallback = sqlite3SelectWalkNoop; + sqlite3WalkSelect(&w, p); +} +#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ + #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** This routine attempts to flatten subqueries as a performance optimization. @@ -133545,9 +134977,9 @@ static void recomputeColumnsUsed( ** (17c) every term within the subquery compound must have a FROM clause ** (17d) the outer query may not be ** (17d1) aggregate, or -** (17d2) DISTINCT, or -** (17d3) a join. -** (17e) the subquery may not contain window functions +** (17d2) DISTINCT +** (17e) the subquery may not contain window functions, and +** (17f) the subquery must not be the RHS of a LEFT JOIN. ** ** The parent and sub-query may contain WHERE clauses. Subject to ** rules (11), (13) and (14), they may also contain ORDER BY, @@ -133563,8 +134995,8 @@ static void recomputeColumnsUsed( ** syntax error and return a detailed message. ** ** (18) If the sub-query is a compound select, then all terms of the -** ORDER BY clause of the parent must be simple references to -** columns of the sub-query. +** ORDER BY clause of the parent must be copies of a term returned +** by the parent query. ** ** (19) If the subquery uses LIMIT then the outer query may not ** have a WHERE clause. @@ -133580,9 +135012,8 @@ static void recomputeColumnsUsed( ** ** (22) The subquery may not be a recursive CTE. ** -** (**) Subsumed into restriction (17d3). Was: If the outer query is -** a recursive CTE, then the sub-query may not be a compound query. -** This restriction is because transforming the +** (23) If the outer query is a recursive CTE, then the sub-query may not be +** a compound query. This restriction is because transforming the ** parent to a compound query confuses the code that handles ** recursive queries in multiSelect(). ** @@ -133624,9 +135055,10 @@ static int flattenSubquery( int isLeftJoin = 0; /* True if pSub is the right side of a LEFT JOIN */ int i; /* Loop counter */ Expr *pWhere; /* The WHERE clause */ - struct SrcList_item *pSubitem; /* The subquery */ + SrcItem *pSubitem; /* The subquery */ sqlite3 *db = pParse->db; Walker w; /* Walker to persist agginfo data */ + int *aCsrMap = 0; /* Check to see if flattening is permitted. Return 0 if not. */ @@ -133722,13 +135154,14 @@ static int flattenSubquery( if( pSub->pOrderBy ){ return 0; /* Restriction (20) */ } - if( isAgg || (p->selFlags & SF_Distinct)!=0 || pSrc->nSrc!=1 ){ - return 0; /* (17d1), (17d2), or (17d3) */ + if( isAgg || (p->selFlags & SF_Distinct)!=0 || isLeftJoin>0 ){ + return 0; /* (17d1), (17d2), or (17f) */ } for(pSub1=pSub; pSub1; pSub1=pSub1->pPrior){ testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); assert( pSub->pSrc!=0 ); + assert( (pSub->selFlags & SF_Recursive)==0 ); assert( pSub->pEList->nExpr==pSub1->pEList->nExpr ); if( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))!=0 /* (17b) */ || (pSub1->pPrior && pSub1->op!=TK_ALL) /* (17a) */ @@ -133749,15 +135182,15 @@ static int flattenSubquery( if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0; } } - } - /* Ex-restriction (23): - ** The only way that the recursive part of a CTE can contain a compound - ** subquery is for the subquery to be one term of a join. But if the - ** subquery is a join, then the flattening has already been stopped by - ** restriction (17d3) - */ - assert( (p->selFlags & SF_Recursive)==0 || pSub->pPrior==0 ); + /* Restriction (23) */ + if( (p->selFlags & SF_Recursive) ) return 0; + + if( pSrc->nSrc>1 ){ + if( pParse->nSelect>500 ) return 0; + aCsrMap = sqlite3DbMallocZero(db, pParse->nTab*sizeof(int)); + } + } /***** If we reach this point, flattening is permitted. *****/ SELECTTRACE(1,pParse,p,("flatten %u.%p from term %d\n", @@ -133769,6 +135202,17 @@ static int flattenSubquery( testcase( i==SQLITE_DENY ); pParse->zAuthContext = zSavedAuthContext; + /* Delete the transient structures associated with thesubquery */ + pSub1 = pSubitem->pSelect; + sqlite3DbFree(db, pSubitem->zDatabase); + sqlite3DbFree(db, pSubitem->zName); + sqlite3DbFree(db, pSubitem->zAlias); + pSubitem->zDatabase = 0; + pSubitem->zName = 0; + pSubitem->zAlias = 0; + pSubitem->pSelect = 0; + assert( pSubitem->pOn==0 ); + /* If the sub-query is a compound SELECT statement, then (by restrictions ** 17 and 18 above) it must be a UNION ALL and the parent query must ** be of the form: @@ -133807,18 +135251,23 @@ static int flattenSubquery( ExprList *pOrderBy = p->pOrderBy; Expr *pLimit = p->pLimit; Select *pPrior = p->pPrior; + Table *pItemTab = pSubitem->pTab; + pSubitem->pTab = 0; p->pOrderBy = 0; - p->pSrc = 0; p->pPrior = 0; p->pLimit = 0; pNew = sqlite3SelectDup(db, p, 0); p->pLimit = pLimit; p->pOrderBy = pOrderBy; - p->pSrc = pSrc; p->op = TK_ALL; + pSubitem->pTab = pItemTab; if( pNew==0 ){ p->pPrior = pPrior; }else{ + pNew->selId = ++pParse->nSelect; + if( aCsrMap && db->mallocFailed==0 ){ + renumberCursors(pParse, pNew, iFrom, aCsrMap); + } pNew->pPrior = pPrior; if( pPrior ) pPrior->pNext = pNew; pNew->pNext = p; @@ -133826,24 +135275,13 @@ static int flattenSubquery( SELECTTRACE(2,pParse,p,("compound-subquery flattener" " creates %u as peer\n",pNew->selId)); } - if( db->mallocFailed ) return 1; + assert( pSubitem->pSelect==0 ); + } + sqlite3DbFree(db, aCsrMap); + if( db->mallocFailed ){ + pSubitem->pSelect = pSub1; + return 1; } - - /* Begin flattening the iFrom-th entry of the FROM clause - ** in the outer query. - */ - pSub = pSub1 = pSubitem->pSelect; - - /* Delete the transient table structure associated with the - ** subquery - */ - sqlite3DbFree(db, pSubitem->zDatabase); - sqlite3DbFree(db, pSubitem->zName); - sqlite3DbFree(db, pSubitem->zAlias); - pSubitem->zDatabase = 0; - pSubitem->zName = 0; - pSubitem->zAlias = 0; - pSubitem->pSelect = 0; /* Defer deleting the Table object associated with the ** subquery until code generation is @@ -133856,8 +135294,10 @@ static int flattenSubquery( Table *pTabToDel = pSubitem->pTab; if( pTabToDel->nTabRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); - pTabToDel->pNextZombie = pToplevel->pZombieTab; - pToplevel->pZombieTab = pTabToDel; + sqlite3ParserAddCleanup(pToplevel, + (void(*)(sqlite3*,void*))sqlite3DeleteTable, + pTabToDel); + testcase( pToplevel->earlyCleanup ); }else{ pTabToDel->nTabRef--; } @@ -133877,6 +135317,7 @@ static int flattenSubquery( ** those references with expressions that resolve to the subquery FROM ** elements we are now copying in. */ + pSub = pSub1; for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){ int nSubSrc; u8 jointype = 0; @@ -133885,14 +135326,8 @@ static int flattenSubquery( nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */ pSrc = pParent->pSrc; /* FROM clause of the outer query */ - if( pSrc ){ - assert( pParent==p ); /* First time through the loop */ - jointype = pSubitem->fg.jointype; - }else{ - assert( pParent!=p ); /* 2nd and subsequent times through the loop */ - pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0); - if( pSrc==0 ) break; - pParent->pSrc = pSrc; + if( pParent==p ){ + jointype = pSubitem->fg.jointype; /* First time through the loop */ } /* The subquery uses a single slot of the FROM clause of the outer @@ -134012,7 +135447,7 @@ static int flattenSubquery( sqlite3SelectDelete(db, pSub1); #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x100 ){ + if( sqlite3SelectTrace & 0x100 ){ SELECTTRACE(0x100,pParse,p,("After flattening:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -134206,6 +135641,35 @@ static int propagateConstants( return nChng; } +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) +# if !defined(SQLITE_OMIT_WINDOWFUNC) +/* +** This function is called to determine whether or not it is safe to +** push WHERE clause expression pExpr down to FROM clause sub-query +** pSubq, which contains at least one window function. Return 1 +** if it is safe and the expression should be pushed down, or 0 +** otherwise. +** +** It is only safe to push the expression down if it consists only +** of constants and copies of expressions that appear in the PARTITION +** BY clause of all window function used by the sub-query. It is safe +** to filter out entire partitions, but not rows within partitions, as +** this may change the results of the window functions. +** +** At the time this function is called it is guaranteed that +** +** * the sub-query uses only one distinct window frame, and +** * that the window frame has a PARTITION BY clase. +*/ +static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ + assert( pSubq->pWin->pPartition ); + assert( (pSubq->selFlags & SF_MultiPart)==0 ); + assert( pSubq->pPrior==0 ); + return sqlite3ExprIsConstantOrGroupBy(pParse, pExpr, pSubq->pWin->pPartition); +} +# endif /* SQLITE_OMIT_WINDOWFUNC */ +#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ + #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** Make copies of relevant WHERE clause terms of the outer query into @@ -134253,9 +135717,24 @@ static int propagateConstants( ** But if the (b2=2) term were to be pushed down into the bb subquery, ** then the (1,1,NULL) row would be suppressed. ** -** (6) The inner query features one or more window-functions (since -** changes to the WHERE clause of the inner query could change the -** window over which window functions are calculated). +** (6) Window functions make things tricky as changes to the WHERE clause +** of the inner query could change the window over which window +** functions are calculated. Therefore, do not attempt the optimization +** if: +** +** (6a) The inner query uses multiple incompatible window partitions. +** +** (6b) The inner query is a compound and uses window-functions. +** +** (6c) The WHERE clause does not consist entirely of constants and +** copies of expressions found in the PARTITION BY clause of +** all window-functions used by the sub-query. It is safe to +** filter out entire partitions, as this does not change the +** window over which any window-function is calculated. +** +** (7) The inner query is a Common Table Expression (CTE) that should +** be materialized. (This restriction is implemented in the calling +** routine.) ** ** Return 0 if no changes are made and non-zero if one or more WHERE clause ** terms are duplicated into the subquery. @@ -134269,13 +135748,17 @@ static int pushDownWhereTerms( ){ Expr *pNew; int nChng = 0; - Select *pSel; if( pWhere==0 ) return 0; - if( pSubq->selFlags & SF_Recursive ) return 0; /* restriction (2) */ + if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ) return 0; #ifndef SQLITE_OMIT_WINDOWFUNC - for(pSel=pSubq; pSel; pSel=pSel->pPrior){ - if( pSel->pWin ) return 0; /* restriction (6) */ + if( pSubq->pPrior ){ + Select *pSel; + for(pSel=pSubq; pSel; pSel=pSel->pPrior){ + if( pSel->pWin ) return 0; /* restriction (6b) */ + } + }else{ + if( pSubq->pWin && pSubq->pWin->pPartition==0 ) return 0; } #endif @@ -134311,6 +135794,7 @@ static int pushDownWhereTerms( } if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){ nChng++; + pSubq->selFlags |= SF_PushDown; while( pSubq ){ SubstContext x; pNew = sqlite3ExprDup(pParse->db, pWhere, 0); @@ -134321,6 +135805,14 @@ static int pushDownWhereTerms( x.isLeftJoin = 0; x.pEList = pSubq->pEList; pNew = substExpr(&x, pNew); +#ifndef SQLITE_OMIT_WINDOWFUNC + if( pSubq->pWin && 0==pushDownWindowCheck(pParse, pSubq, pNew) ){ + /* Restriction 6c has prevented push-down in this case */ + sqlite3ExprDelete(pParse->db, pNew); + nChng--; + break; + } +#endif if( pSubq->selFlags & SF_Aggregate ){ pSubq->pHaving = sqlite3ExprAnd(pParse, pSubq->pHaving, pNew); }else{ @@ -134359,7 +135851,11 @@ static u8 minMaxQuery(sqlite3 *db, Expr *pFunc, ExprList **ppMinMax){ assert( *ppMinMax==0 ); assert( pFunc->op==TK_AGG_FUNCTION ); assert( !IsWindowFunc(pFunc) ); - if( pEList==0 || pEList->nExpr!=1 || ExprHasProperty(pFunc, EP_WinFunc) ){ + if( pEList==0 + || pEList->nExpr!=1 + || ExprHasProperty(pFunc, EP_WinFunc) + || OptimizationDisabled(db, SQLITE_MinMaxOpt) + ){ return eRet; } zFunc = pFunc->u.zToken; @@ -134422,24 +135918,26 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ ** SQLITE_ERROR and leave an error in pParse. Otherwise, populate ** pFrom->pIndex and return SQLITE_OK. */ -SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){ - if( pFrom->pTab && pFrom->fg.isIndexedBy ){ - Table *pTab = pFrom->pTab; - char *zIndexedBy = pFrom->u1.zIndexedBy; - Index *pIdx; - for(pIdx=pTab->pIndex; - pIdx && sqlite3StrICmp(pIdx->zName, zIndexedBy); - pIdx=pIdx->pNext - ); - if( !pIdx ){ - sqlite3ErrorMsg(pParse, "no such index: %s", zIndexedBy, 0); - pParse->checkSchema = 1; - return SQLITE_ERROR; - } - pFrom->pIBIndex = pIdx; +SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, SrcItem *pFrom){ + Table *pTab = pFrom->pTab; + char *zIndexedBy = pFrom->u1.zIndexedBy; + Index *pIdx; + assert( pTab!=0 ); + assert( pFrom->fg.isIndexedBy!=0 ); + + for(pIdx=pTab->pIndex; + pIdx && sqlite3StrICmp(pIdx->zName, zIndexedBy); + pIdx=pIdx->pNext + ); + if( !pIdx ){ + sqlite3ErrorMsg(pParse, "no such index: %s", zIndexedBy, 0); + pParse->checkSchema = 1; + return SQLITE_ERROR; } + pFrom->u2.pIBIndex = pIdx; return SQLITE_OK; } + /* ** Detect compound SELECT statements that use an ORDER BY clause with ** an alternative collating sequence. @@ -134526,7 +136024,7 @@ static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){ ** arguments. If it does, leave an error message in pParse and return ** non-zero, since pFrom is not allowed to be a table-valued function. */ -static int cannotBeFunction(Parse *pParse, struct SrcList_item *pFrom){ +static int cannotBeFunction(Parse *pParse, SrcItem *pFrom){ if( pFrom->fg.isTabFunc ){ sqlite3ErrorMsg(pParse, "'%s' is not a function", pFrom->zName); return 1; @@ -134547,19 +136045,19 @@ static int cannotBeFunction(Parse *pParse, struct SrcList_item *pFrom){ */ static struct Cte *searchWith( With *pWith, /* Current innermost WITH clause */ - struct SrcList_item *pItem, /* FROM clause element to resolve */ + SrcItem *pItem, /* FROM clause element to resolve */ With **ppContext /* OUT: WITH clause return value belongs to */ ){ - const char *zName; - if( pItem->zDatabase==0 && (zName = pItem->zName)!=0 ){ - With *p; - for(p=pWith; p; p=p->pOuter){ - int i; - for(i=0; i<p->nCte; i++){ - if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){ - *ppContext = p; - return &p->a[i]; - } + const char *zName = pItem->zName; + With *p; + assert( pItem->zDatabase==0 ); + assert( zName!=0 ); + for(p=pWith; p; p=p->pOuter){ + int i; + for(i=0; i<p->nCte; i++){ + if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){ + *ppContext = p; + return &p->a[i]; } } } @@ -134577,46 +136075,54 @@ static struct Cte *searchWith( ** statement with which it is associated. */ SQLITE_PRIVATE void sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ - assert( bFree==0 || (pParse->pWith==0 && pParse->pWithToFree==0) ); if( pWith ){ assert( pParse->pWith!=pWith ); pWith->pOuter = pParse->pWith; pParse->pWith = pWith; - if( bFree ) pParse->pWithToFree = pWith; + if( bFree ){ + sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))sqlite3WithDelete, + pWith); + testcase( pParse->earlyCleanup ); + } } } /* ** This function checks if argument pFrom refers to a CTE declared by -** a WITH clause on the stack currently maintained by the parser. And, -** if currently processing a CTE expression, if it is a recursive -** reference to the current CTE. +** a WITH clause on the stack currently maintained by the parser (on the +** pParse->pWith linked list). And if currently processing a CTE +** CTE expression, through routine checks to see if the reference is +** a recursive reference to the CTE. ** -** If pFrom falls into either of the two categories above, pFrom->pTab -** and other fields are populated accordingly. The caller should check -** (pFrom->pTab!=0) to determine whether or not a successful match -** was found. +** If pFrom matches a CTE according to either of these two above, pFrom->pTab +** and other fields are populated accordingly. ** -** Whether or not a match is found, SQLITE_OK is returned if no error -** occurs. If an error does occur, an error message is stored in the -** parser and some error code other than SQLITE_OK returned. +** Return 0 if no match is found. +** Return 1 if a match is found. +** Return 2 if an error condition is detected. */ -static int withExpand( - Walker *pWalker, - struct SrcList_item *pFrom +static int resolveFromTermToCte( + Parse *pParse, /* The parsing context */ + Walker *pWalker, /* Current tree walker */ + SrcItem *pFrom /* The FROM clause term to check */ ){ - Parse *pParse = pWalker->pParse; - sqlite3 *db = pParse->db; - struct Cte *pCte; /* Matched CTE (or NULL if no match) */ - With *pWith; /* WITH clause that pCte belongs to */ + Cte *pCte; /* Matched CTE (or NULL if no match) */ + With *pWith; /* The matching WITH */ assert( pFrom->pTab==0 ); - if( pParse->nErr ){ - return SQLITE_ERROR; + if( pParse->pWith==0 ){ + /* There are no WITH clauses in the stack. No match is possible */ + return 0; + } + if( pFrom->zDatabase!=0 ){ + /* The FROM term contains a schema qualifier (ex: main.t1) and so + ** it cannot possibly be a CTE reference. */ + return 0; } - pCte = searchWith(pParse->pWith, pFrom, &pWith); if( pCte ){ + sqlite3 *db = pParse->db; Table *pTab; ExprList *pEList; Select *pSel; @@ -134625,6 +136131,7 @@ static int withExpand( int bMayRecursive; /* True if compound joined by UNION [ALL] */ With *pSavedWith; /* Initial value of pParse->pWith */ int iRecTab = -1; /* Cursor for recursive table */ + CteUse *pCteUse; /* If pCte->zCteErr is non-NULL at this point, then this is an illegal ** recursive reference to CTE pCte. Leave an error in pParse and return @@ -134632,21 +136139,39 @@ static int withExpand( ** In this case, proceed. */ if( pCte->zCteErr ){ sqlite3ErrorMsg(pParse, pCte->zCteErr, pCte->zName); - return SQLITE_ERROR; + return 2; } - if( cannotBeFunction(pParse, pFrom) ) return SQLITE_ERROR; + if( cannotBeFunction(pParse, pFrom) ) return 2; assert( pFrom->pTab==0 ); - pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); - if( pTab==0 ) return WRC_Abort; + pTab = sqlite3DbMallocZero(db, sizeof(Table)); + if( pTab==0 ) return 2; + pCteUse = pCte->pUse; + if( pCteUse==0 ){ + pCte->pUse = pCteUse = sqlite3DbMallocZero(db, sizeof(pCteUse[0])); + if( pCteUse==0 + || sqlite3ParserAddCleanup(pParse,sqlite3DbFree,pCteUse)==0 + ){ + sqlite3DbFree(db, pTab); + return 2; + } + pCteUse->eM10d = pCte->eM10d; + } + pFrom->pTab = pTab; pTab->nTabRef = 1; pTab->zName = sqlite3DbStrDup(db, pCte->zName); pTab->iPKey = -1; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0); - if( db->mallocFailed ) return SQLITE_NOMEM_BKPT; + if( db->mallocFailed ) return 2; assert( pFrom->pSelect ); + pFrom->fg.isCte = 1; + pFrom->u2.pCteUse = pCteUse; + pCteUse->nUse++; + if( pCteUse->nUse>=2 && pCteUse->eM10d==M10d_Any ){ + pCteUse->eM10d = M10d_Yes; + } /* Check if this is a recursive CTE. */ pRecTerm = pSel = pFrom->pSelect; @@ -134656,7 +136181,7 @@ static int withExpand( SrcList *pSrc = pRecTerm->pSrc; assert( pRecTerm->pPrior!=0 ); for(i=0; i<pSrc->nSrc; i++){ - struct SrcList_item *pItem = &pSrc->a[i]; + SrcItem *pItem = &pSrc->a[i]; if( pItem->zDatabase==0 && pItem->zName!=0 && 0==sqlite3StrICmp(pItem->zName, pCte->zName) @@ -134668,7 +136193,7 @@ static int withExpand( sqlite3ErrorMsg(pParse, "multiple references to recursive table: %s", pCte->zName ); - return SQLITE_ERROR; + return 2; } pRecTerm->selFlags |= SF_Recursive; if( iRecTab<0 ) iRecTab = pParse->nTab++; @@ -134683,16 +136208,24 @@ static int withExpand( pSavedWith = pParse->pWith; pParse->pWith = pWith; if( pSel->selFlags & SF_Recursive ){ + int rc; assert( pRecTerm!=0 ); assert( (pRecTerm->selFlags & SF_Recursive)==0 ); assert( pRecTerm->pNext!=0 ); assert( (pRecTerm->pNext->selFlags & SF_Recursive)!=0 ); assert( pRecTerm->pWith==0 ); pRecTerm->pWith = pSel->pWith; - sqlite3WalkSelect(pWalker, pRecTerm); + rc = sqlite3WalkSelect(pWalker, pRecTerm); pRecTerm->pWith = 0; + if( rc ){ + pParse->pWith = pSavedWith; + return 2; + } }else{ - sqlite3WalkSelect(pWalker, pSel); + if( sqlite3WalkSelect(pWalker, pSel) ){ + pParse->pWith = pSavedWith; + return 2; + } } pParse->pWith = pWith; @@ -134704,7 +136237,7 @@ static int withExpand( pCte->zName, pEList->nExpr, pCte->pCols->nExpr ); pParse->pWith = pSavedWith; - return SQLITE_ERROR; + return 2; } pEList = pCte->pCols; } @@ -134720,9 +136253,9 @@ static int withExpand( } pCte->zCteErr = 0; pParse->pWith = pSavedWith; + return 1; /* Success */ } - - return SQLITE_OK; + return 0; /* No match */ } #endif @@ -134756,7 +136289,7 @@ static void selectPopWith(Walker *pWalker, Select *p){ ** SQLITE_OK is returned. Otherwise, if an OOM error is encountered, ** SQLITE_NOMEM. */ -SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, struct SrcList_item *pFrom){ +SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){ Select *pSel = pFrom->pSelect; Table *pTab; @@ -134804,10 +136337,10 @@ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, struct SrcList_item *pFr */ static int selectExpander(Walker *pWalker, Select *p){ Parse *pParse = pWalker->pParse; - int i, j, k; + int i, j, k, rc; SrcList *pTabList; ExprList *pEList; - struct SrcList_item *pFrom; + SrcItem *pFrom; sqlite3 *db = pParse->db; Expr *pE, *pRight, *pExpr; u16 selFlags = p->selFlags; @@ -134843,10 +136376,6 @@ static int selectExpander(Walker *pWalker, Select *p){ assert( pFrom->fg.isRecursive==0 || pFrom->pTab!=0 ); if( pFrom->pTab ) continue; assert( pFrom->fg.isRecursive==0 ); -#ifndef SQLITE_OMIT_CTE - if( withExpand(pWalker, pFrom) ) return WRC_Abort; - if( pFrom->pTab ) {} else -#endif if( pFrom->zName==0 ){ #ifndef SQLITE_OMIT_SUBQUERY Select *pSel = pFrom->pSelect; @@ -134855,6 +136384,12 @@ static int selectExpander(Walker *pWalker, Select *p){ assert( pFrom->pTab==0 ); if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort; if( sqlite3ExpandSubquery(pParse, pFrom) ) return WRC_Abort; +#endif +#ifndef SQLITE_OMIT_CTE + }else if( (rc = resolveFromTermToCte(pParse, pWalker, pFrom))!=0 ){ + if( rc>1 ) return WRC_Abort; + pTab = pFrom->pTab; + assert( pTab!=0 ); #endif }else{ /* An ordinary table or view name in the FROM clause */ @@ -134877,7 +136412,10 @@ static int selectExpander(Walker *pWalker, Select *p){ u8 eCodeOrig = pWalker->eCode; if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; assert( pFrom->pSelect==0 ); - if( pTab->pSelect && (db->flags & SQLITE_EnableView)==0 ){ + if( pTab->pSelect + && (db->flags & SQLITE_EnableView)==0 + && pTab->pSchema!=db->aDb[1].pSchema + ){ sqlite3ErrorMsg(pParse, "access to view \"%s\" prohibited", pTab->zName); } @@ -134903,7 +136441,7 @@ static int selectExpander(Walker *pWalker, Select *p){ } /* Locate the index named by the INDEXED BY clause, if any. */ - if( sqlite3IndexedByLookup(pParse, pFrom) ){ + if( pFrom->fg.isIndexedBy && sqlite3IndexedByLookup(pParse, pFrom) ){ return WRC_Abort; } } @@ -135146,7 +136684,7 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ Parse *pParse; int i; SrcList *pTabList; - struct SrcList_item *pFrom; + SrcItem *pFrom; assert( p->selFlags & SF_Resolved ); if( p->selFlags & SF_HasTypeInfo ) return; @@ -135417,7 +136955,9 @@ static void explainSimpleCount( static int havingToWhereExprCb(Walker *pWalker, Expr *pExpr){ if( pExpr->op!=TK_AND ){ Select *pS = pWalker->u.pSelect; - if( sqlite3ExprIsConstantOrGroupBy(pWalker->pParse, pExpr, pS->pGroupBy) ){ + if( sqlite3ExprIsConstantOrGroupBy(pWalker->pParse, pExpr, pS->pGroupBy) + && ExprAlwaysFalse(pExpr)==0 + ){ sqlite3 *db = pWalker->pParse->db; Expr *pNew = sqlite3Expr(db, TK_INTEGER, "1"); if( pNew ){ @@ -135456,7 +136996,7 @@ static void havingToWhere(Parse *pParse, Select *p){ sWalker.u.pSelect = p; sqlite3WalkExpr(&sWalker, p->pHaving); #if SELECTTRACE_ENABLED - if( sWalker.eCode && (sqlite3_unsupported_selecttrace & 0x100)!=0 ){ + if( sWalker.eCode && (sqlite3SelectTrace & 0x100)!=0 ){ SELECTTRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135468,11 +137008,13 @@ static void havingToWhere(Parse *pParse, Select *p){ ** If it is, then return the SrcList_item for the prior view. If it is not, ** then return 0. */ -static struct SrcList_item *isSelfJoinView( +static SrcItem *isSelfJoinView( SrcList *pTabList, /* Search for self-joins in this FROM clause */ - struct SrcList_item *pThis /* Search for prior reference to this subquery */ + SrcItem *pThis /* Search for prior reference to this subquery */ ){ - struct SrcList_item *pItem; + SrcItem *pItem; + assert( pThis->pSelect!=0 ); + if( pThis->pSelect->selFlags & SF_PushDown ) return 0; for(pItem = pTabList->a; pItem<pThis; pItem++){ Select *pS1; if( pItem->pSelect==0 ) continue; @@ -135488,9 +137030,7 @@ static struct SrcList_item *isSelfJoinView( ** names in the same FROM clause. */ continue; } - if( sqlite3ExprCompare(0, pThis->pSelect->pWhere, pS1->pWhere, -1) - || sqlite3ExprCompare(0, pThis->pSelect->pHaving, pS1->pHaving, -1) - ){ + if( pItem->pSelect->selFlags & SF_PushDown ){ /* The view was modified by some other optimization such as ** pushDownWhereTerms() */ continue; @@ -135500,6 +137040,15 @@ static struct SrcList_item *isSelfJoinView( return 0; } +/* +** Deallocate a single AggInfo object +*/ +static void agginfoFree(sqlite3 *db, AggInfo *p){ + sqlite3DbFree(db, p->aCol); + sqlite3DbFree(db, p->aFunc); + sqlite3DbFreeNN(db, p); +} + #ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION /* ** Attempt to transform a query of the form @@ -135578,7 +137127,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ p->selFlags &= ~SF_Aggregate; #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x400 ){ + if( sqlite3SelectTrace & 0x400 ){ SELECTTRACE(0x400,pParse,p,("After count-of-view optimization:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135631,7 +137180,7 @@ SQLITE_PRIVATE int sqlite3Select( if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; #if SELECTTRACE_ENABLED SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain)); - if( sqlite3_unsupported_selecttrace & 0x100 ){ + if( sqlite3SelectTrace & 0x100 ){ sqlite3TreeViewSelect(0, p, 0); } #endif @@ -135645,8 +137194,19 @@ SQLITE_PRIVATE int sqlite3Select( pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard || pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_DistFifo ); /* All of these destinations are also able to ignore the ORDER BY clause */ - sqlite3ExprListDelete(db, p->pOrderBy); - p->pOrderBy = 0; + if( p->pOrderBy ){ +#if SELECTTRACE_ENABLED + SELECTTRACE(1,pParse,p, ("dropping superfluous ORDER BY:\n")); + if( sqlite3SelectTrace & 0x100 ){ + sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY"); + } +#endif + sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))sqlite3ExprListDelete, + p->pOrderBy); + testcase( pParse->earlyCleanup ); + p->pOrderBy = 0; + } p->selFlags &= ~SF_Distinct; p->selFlags |= SF_NoopOrderBy; } @@ -135656,7 +137216,7 @@ SQLITE_PRIVATE int sqlite3Select( } assert( p->pEList!=0 ); #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x104 ){ + if( sqlite3SelectTrace & 0x104 ){ SELECTTRACE(0x104,pParse,p, ("after name resolution:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135667,9 +137227,9 @@ SQLITE_PRIVATE int sqlite3Select( ** In this case, it is an error if the target object (pSrc->a[0]) name ** or alias is duplicated within FROM clause (pSrc->a[1..n]). */ if( p->selFlags & SF_UpdateFrom ){ - struct SrcList_item *p0 = &p->pSrc->a[0]; + SrcItem *p0 = &p->pSrc->a[0]; for(i=1; i<p->pSrc->nSrc; i++){ - struct SrcList_item *p1 = &p->pSrc->a[i]; + SrcItem *p1 = &p->pSrc->a[i]; if( p0->pTab==p1->pTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){ sqlite3ErrorMsg(pParse, "target object/alias may not appear in FROM clause: %s", @@ -135691,7 +137251,7 @@ SQLITE_PRIVATE int sqlite3Select( goto select_end; } #if SELECTTRACE_ENABLED - if( p->pWin && (sqlite3_unsupported_selecttrace & 0x108)!=0 ){ + if( p->pWin && (sqlite3SelectTrace & 0x108)!=0 ){ SELECTTRACE(0x104,pParse,p, ("after window rewrite:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135707,7 +137267,7 @@ SQLITE_PRIVATE int sqlite3Select( */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) for(i=0; !p->pPrior && i<pTabList->nSrc; i++){ - struct SrcList_item *pItem = &pTabList->a[i]; + SrcItem *pItem = &pTabList->a[i]; Select *pSub = pItem->pSelect; Table *pTab = pItem->pTab; @@ -135798,7 +137358,7 @@ SQLITE_PRIVATE int sqlite3Select( rc = multiSelect(pParse, p, pDest); #if SELECTTRACE_ENABLED SELECTTRACE(0x1,pParse,p,("end compound-select processing\n")); - if( (sqlite3_unsupported_selecttrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ + if( (sqlite3SelectTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ sqlite3TreeViewSelect(0, p, 0); } #endif @@ -135817,7 +137377,7 @@ SQLITE_PRIVATE int sqlite3Select( && propagateConstants(pParse, p) ){ #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x100 ){ + if( sqlite3SelectTrace & 0x100 ){ SELECTTRACE(0x100,pParse,p,("After constant propagation:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135841,7 +137401,8 @@ SQLITE_PRIVATE int sqlite3Select( ** (2) Generate code for all sub-queries */ for(i=0; i<pTabList->nSrc; i++){ - struct SrcList_item *pItem = &pTabList->a[i]; + SrcItem *pItem = &pTabList->a[i]; + SrcItem *pPrior; SelectDest dest; Select *pSub; #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) @@ -135901,16 +137462,18 @@ SQLITE_PRIVATE int sqlite3Select( ** inside the subquery. This can help the subquery to run more efficiently. */ if( OptimizationEnabled(db, SQLITE_PushDown) + && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor, (pItem->fg.jointype & JT_OUTER)!=0) ){ #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x100 ){ + if( sqlite3SelectTrace & 0x100 ){ SELECTTRACE(0x100,pParse,p, ("After WHERE-clause push-down into subquery %d:\n", pSub->selId)); sqlite3TreeViewSelect(0, p, 0); } #endif + assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); }else{ SELECTTRACE(0x100,pParse,p,("Push-down not possible\n")); } @@ -135920,16 +137483,18 @@ SQLITE_PRIVATE int sqlite3Select( /* Generate code to implement the subquery ** - ** The subquery is implemented as a co-routine if the subquery is - ** guaranteed to be the outer loop (so that it does not need to be - ** computed more than once) + ** The subquery is implemented as a co-routine if: + ** (1) the subquery is guaranteed to be the outer loop (so that + ** it does not need to be computed more than once), and + ** (2) the subquery is not a CTE that should be materialized ** - ** TODO: Are there other reasons beside (1) to use a co-routine + ** TODO: Are there other reasons beside (1) and (2) to use a co-routine ** implementation? */ if( i==0 && (pTabList->nSrc==1 || (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0) /* (1) */ + && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) /* (2) */ ){ /* Implement a co-routine that will return a single row of the result ** set on each invocation. @@ -135949,16 +137514,32 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeEndCoroutine(v, pItem->regReturn); sqlite3VdbeJumpHere(v, addrTop-1); sqlite3ClearTempRegCache(pParse); + }else if( pItem->fg.isCte && pItem->u2.pCteUse->addrM9e>0 ){ + /* This is a CTE for which materialization code has already been + ** generated. Invoke the subroutine to compute the materialization, + ** the make the pItem->iCursor be a copy of the ephemerial table that + ** holds the result of the materialization. */ + CteUse *pCteUse = pItem->u2.pCteUse; + sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e); + if( pItem->iCursor!=pCteUse->iCur ){ + sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pCteUse->iCur); + } + pSub->nSelectRow = pCteUse->nRowEst; + }else if( (pPrior = isSelfJoinView(pTabList, pItem))!=0 ){ + /* This view has already been materialized by a prior entry in + ** this same FROM clause. Reuse it. */ + if( pPrior->addrFillSub ){ + sqlite3VdbeAddOp2(v, OP_Gosub, pPrior->regReturn, pPrior->addrFillSub); + } + sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pPrior->iCursor); + pSub->nSelectRow = pPrior->pSelect->nSelectRow; }else{ - /* Generate a subroutine that will fill an ephemeral table with - ** the content of this subquery. pItem->addrFillSub will point - ** to the address of the generated subroutine. pItem->regReturn - ** is a register allocated to hold the subroutine return address - */ + /* Materalize the view. If the view is not correlated, generate a + ** subroutine to do the materialization so that subsequent uses of + ** the same view can reuse the materialization. */ int topAddr; int onceAddr = 0; int retAddr; - struct SrcList_item *pPrior; testcase( pItem->addrFillSub==0 ); /* Ticket c52b09c7f38903b1311 */ pItem->regReturn = ++pParse->nMem; @@ -135973,22 +137554,22 @@ SQLITE_PRIVATE int sqlite3Select( }else{ VdbeNoopComment((v, "materialize \"%s\"", pItem->pTab->zName)); } - pPrior = isSelfJoinView(pTabList, pItem); - if( pPrior ){ - sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pPrior->iCursor); - assert( pPrior->pSelect!=0 ); - pSub->nSelectRow = pPrior->pSelect->nSelectRow; - }else{ - sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); - ExplainQueryPlan((pParse, 1, "MATERIALIZE %u", pSub->selId)); - sqlite3Select(pParse, pSub, &dest); - } + sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); + ExplainQueryPlan((pParse, 1, "MATERIALIZE %u", pSub->selId)); + sqlite3Select(pParse, pSub, &dest); pItem->pTab->nRowLogEst = pSub->nSelectRow; if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); retAddr = sqlite3VdbeAddOp1(v, OP_Return, pItem->regReturn); VdbeComment((v, "end %s", pItem->pTab->zName)); sqlite3VdbeChangeP1(v, topAddr, retAddr); sqlite3ClearTempRegCache(pParse); + if( pItem->fg.isCte && pItem->fg.isCorrelated==0 ){ + CteUse *pCteUse = pItem->u2.pCteUse; + pCteUse->addrM9e = pItem->addrFillSub; + pCteUse->regRtn = pItem->regReturn; + pCteUse->iCur = pItem->iCursor; + pCteUse->nRowEst = pSub->nSelectRow; + } } if( db->mallocFailed ) goto select_end; pParse->nHeight -= sqlite3SelectExprHeight(p); @@ -136005,7 +137586,7 @@ SQLITE_PRIVATE int sqlite3Select( sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0; #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x400 ){ + if( sqlite3SelectTrace & 0x400 ){ SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -136041,7 +137622,7 @@ SQLITE_PRIVATE int sqlite3Select( assert( sDistinct.isTnct ); #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x400 ){ + if( sqlite3SelectTrace & 0x400 ){ SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -136133,6 +137714,7 @@ SQLITE_PRIVATE int sqlite3Select( sSort.pOrderBy = 0; } } + SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); /* If sorting index that was created by a prior OP_OpenEphemeral ** instruction ended up not being needed, then change the OP_OpenEphemeral @@ -136171,6 +137753,7 @@ SQLITE_PRIVATE int sqlite3Select( /* End the database scan loop. */ + SELECTTRACE(1,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); } }else{ @@ -136241,11 +137824,14 @@ SQLITE_PRIVATE int sqlite3Select( ** SELECT statement. */ pAggInfo = sqlite3DbMallocZero(db, sizeof(*pAggInfo) ); - if( pAggInfo==0 ){ + if( pAggInfo ){ + sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))agginfoFree, pAggInfo); + testcase( pParse->earlyCleanup ); + } + if( db->mallocFailed ){ goto select_end; } - pAggInfo->pNext = pParse->pAggList; - pParse->pAggList = pAggInfo; pAggInfo->selId = p->selId; memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; @@ -136289,10 +137875,14 @@ SQLITE_PRIVATE int sqlite3Select( pAggInfo->mxReg = pParse->nMem; if( db->mallocFailed ) goto select_end; #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x400 ){ + if( sqlite3SelectTrace & 0x400 ){ int ii; SELECTTRACE(0x400,pParse,p,("After aggregate analysis %p:\n", pAggInfo)); sqlite3TreeViewSelect(0, p, 0); + if( minMaxFlag ){ + sqlite3DebugPrintf("MIN/MAX Optimization (0x%02x) adds:\n", minMaxFlag); + sqlite3TreeViewExprList(0, pMinMaxOrderBy, 0, "ORDERBY"); + } for(ii=0; ii<pAggInfo->nColumn; ii++){ sqlite3DebugPrintf("agg-column[%d] iMem=%d\n", ii, pAggInfo->aCol[ii].iMem); @@ -136360,6 +137950,7 @@ SQLITE_PRIVATE int sqlite3Select( WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0), 0 ); if( pWInfo==0 ) goto select_end; + SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){ /* The optimizer is able to deliver rows in group by order so ** we do not have to sort. The OP_OpenEphemeral table will be @@ -136408,6 +137999,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp2(v, OP_SorterInsert, pAggInfo->sortingIdx, regRecord); sqlite3ReleaseTempReg(pParse, regRecord); sqlite3ReleaseTempRange(pParse, regBase, nCol); + SELECTTRACE(1,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); pAggInfo->sortingIdxPTab = sortPTab = pParse->nTab++; sortOut = sqlite3GetTempReg(pParse); @@ -136482,9 +138074,10 @@ SQLITE_PRIVATE int sqlite3Select( /* End of the loop */ if( groupBySort ){ - sqlite3VdbeAddOp2(v, OP_SorterNext, pAggInfo->sortingIdx, addrTopOfLoop); + sqlite3VdbeAddOp2(v, OP_SorterNext, pAggInfo->sortingIdx,addrTopOfLoop); VdbeCoverage(v); }else{ + SELECTTRACE(1,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); sqlite3VdbeChangeToNoop(v, addrSortingIdx); } @@ -136594,7 +138187,6 @@ SQLITE_PRIVATE int sqlite3Select( explainSimpleCount(pParse, pTab, pBest); }else{ int regAcc = 0; /* "populate accumulators" flag */ - int addrSkip; /* If there are accumulator registers but no min() or max() functions ** without FILTER clauses, allocate register regAcc. Register regAcc @@ -136641,12 +138233,13 @@ SQLITE_PRIVATE int sqlite3Select( if( pWInfo==0 ){ goto select_end; } + SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); updateAccumulator(pParse, regAcc, pAggInfo); if( regAcc ) sqlite3VdbeAddOp2(v, OP_Integer, 1, regAcc); - addrSkip = sqlite3WhereOrderByLimitOptLabel(pWInfo); - if( addrSkip!=sqlite3WhereContinueLabel(pWInfo) ){ - sqlite3VdbeGoto(v, addrSkip); + if( minMaxFlag ){ + sqlite3WhereMinMaxOptEarlyOut(v, pWInfo); } + SELECTTRACE(1,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); finalizeAggFunctions(pParse, pAggInfo); } @@ -136691,15 +138284,13 @@ select_end: if( pAggInfo && !db->mallocFailed ){ for(i=0; i<pAggInfo->nColumn; i++){ Expr *pExpr = pAggInfo->aCol[i].pCExpr; - assert( pExpr!=0 || db->mallocFailed ); - if( pExpr==0 ) continue; + assert( pExpr!=0 ); assert( pExpr->pAggInfo==pAggInfo ); assert( pExpr->iAgg==i ); } for(i=0; i<pAggInfo->nFunc; i++){ Expr *pExpr = pAggInfo->aFunc[i].pFExpr; - assert( pExpr!=0 || db->mallocFailed ); - if( pExpr==0 ) continue; + assert( pExpr!=0 ); assert( pExpr->pAggInfo==pAggInfo ); assert( pExpr->iAgg==i ); } @@ -136708,7 +138299,7 @@ select_end: #if SELECTTRACE_ENABLED SELECTTRACE(0x1,pParse,p,("end processing\n")); - if( (sqlite3_unsupported_selecttrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ + if( (sqlite3SelectTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ sqlite3TreeViewSelect(0, p, 0); } #endif @@ -136969,28 +138560,39 @@ SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3 *db, TriggerStep *pTriggerS ** pTab as well as the triggers lised in pTab->pTrigger. */ SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ - Schema * const pTmpSchema = pParse->db->aDb[1].pSchema; - Trigger *pList = 0; /* List of triggers to return */ + Schema *pTmpSchema; /* Schema of the pTab table */ + Trigger *pList; /* List of triggers to return */ + HashElem *p; /* Loop variable for TEMP triggers */ if( pParse->disableTriggers ){ return 0; } - + pTmpSchema = pParse->db->aDb[1].pSchema; + p = sqliteHashFirst(&pTmpSchema->trigHash); + if( p==0 ){ + return pTab->pTrigger; + } + pList = pTab->pTrigger; if( pTmpSchema!=pTab->pSchema ){ - HashElem *p; - assert( sqlite3SchemaMutexHeld(pParse->db, 0, pTmpSchema) ); - for(p=sqliteHashFirst(&pTmpSchema->trigHash); p; p=sqliteHashNext(p)){ + while( p ){ Trigger *pTrig = (Trigger *)sqliteHashData(p); if( pTrig->pTabSchema==pTab->pSchema && 0==sqlite3StrICmp(pTrig->table, pTab->zName) ){ - pTrig->pNext = (pList ? pList : pTab->pTrigger); + pTrig->pNext = pList; + pList = pTrig; + }else if( pTrig->op==TK_RETURNING ){ + assert( pParse->bReturning ); + assert( &(pParse->u1.pReturning->retTrig) == pTrig ); + pTrig->table = pTab->zName; + pTrig->pTabSchema = pTab->pSchema; + pTrig->pNext = pList; pList = pTrig; } + p = sqliteHashNext(p); } } - - return (pList ? pList : pTab->pTrigger); + return pList; } /* @@ -137266,7 +138868,7 @@ SQLITE_PRIVATE void sqlite3FinishTrigger( sqlite3DbFree(db, z); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddParseSchemaOp(v, iDb, - sqlite3MPrintf(db, "type='trigger' AND name='%q'", zName)); + sqlite3MPrintf(db, "type='trigger' AND name='%q'", zName), 0); } if( db->init.busy ){ @@ -137479,7 +139081,7 @@ SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep( ** Recursively delete a Trigger structure */ SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3 *db, Trigger *pTrigger){ - if( pTrigger==0 ) return; + if( pTrigger==0 || pTrigger->bReturning ) return; sqlite3DeleteTriggerStep(db, pTrigger->step_list); sqlite3DbFree(db, pTrigger->zName); sqlite3DbFree(db, pTrigger->table); @@ -137644,15 +139246,53 @@ SQLITE_PRIVATE Trigger *sqlite3TriggersExist( Trigger *pList = 0; Trigger *p; - if( (pParse->db->flags & SQLITE_EnableTrigger)!=0 ){ - pList = sqlite3TriggerList(pParse, pTab); - } - assert( pList==0 || IsVirtual(pTab)==0 ); - for(p=pList; p; p=p->pNext){ - if( p->op==op && checkColumnOverlap(p->pColumns, pChanges) ){ - mask |= p->tr_tm; + pList = sqlite3TriggerList(pParse, pTab); + assert( pList==0 || IsVirtual(pTab)==0 + || (pList->bReturning && pList->pNext==0) ); + if( pList!=0 ){ + p = pList; + if( (pParse->db->flags & SQLITE_EnableTrigger)==0 + && pTab->pTrigger!=0 + ){ + /* The SQLITE_DBCONFIG_ENABLE_TRIGGER setting is off. That means that + ** only TEMP triggers are allowed. Truncate the pList so that it + ** includes only TEMP triggers */ + if( pList==pTab->pTrigger ){ + pList = 0; + goto exit_triggers_exist; + } + while( ALWAYS(p->pNext) && p->pNext!=pTab->pTrigger ) p = p->pNext; + p->pNext = 0; + p = pList; } + do{ + if( p->op==op && checkColumnOverlap(p->pColumns, pChanges) ){ + mask |= p->tr_tm; + }else if( p->op==TK_RETURNING ){ + /* The first time a RETURNING trigger is seen, the "op" value tells + ** us what time of trigger it should be. */ + assert( sqlite3IsToplevel(pParse) ); + p->op = op; + if( IsVirtual(pTab) ){ + if( op!=TK_INSERT ){ + sqlite3ErrorMsg(pParse, + "%s RETURNING is not available on virtual tables", + op==TK_DELETE ? "DELETE" : "UPDATE"); + } + p->tr_tm = TRIGGER_BEFORE; + }else{ + p->tr_tm = TRIGGER_AFTER; + } + mask |= p->tr_tm; + }else if( p->bReturning && p->op==TK_INSERT && op==TK_UPDATE + && sqlite3IsToplevel(pParse) ){ + /* Also fire a RETURNING trigger for an UPSERT */ + mask |= p->tr_tm; + } + p = p->pNext; + }while( p ); } +exit_triggers_exist: if( pMask ){ *pMask = mask; } @@ -137695,6 +139335,131 @@ SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc( return pSrc; } +/* +** Return true if the pExpr term from the RETURNING clause argument +** list is of the form "*". Raise an error if the terms if of the +** form "table.*". +*/ +static int isAsteriskTerm( + Parse *pParse, /* Parsing context */ + Expr *pTerm /* A term in the RETURNING clause */ +){ + assert( pTerm!=0 ); + if( pTerm->op==TK_ASTERISK ) return 1; + if( pTerm->op!=TK_DOT ) return 0; + assert( pTerm->pRight!=0 ); + assert( pTerm->pLeft!=0 ); + if( pTerm->pRight->op!=TK_ASTERISK ) return 0; + sqlite3ErrorMsg(pParse, "RETURNING may not use \"TABLE.*\" wildcards"); + return 1; +} + +/* The input list pList is the list of result set terms from a RETURNING +** clause. The table that we are returning from is pTab. +** +** This routine makes a copy of the pList, and at the same time expands +** any "*" wildcards to be the complete set of columns from pTab. +*/ +static ExprList *sqlite3ExpandReturning( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* The arguments to RETURNING */ + Table *pTab /* The table being updated */ +){ + ExprList *pNew = 0; + sqlite3 *db = pParse->db; + int i; + + for(i=0; i<pList->nExpr; i++){ + Expr *pOldExpr = pList->a[i].pExpr; + if( NEVER(pOldExpr==0) ) continue; + if( isAsteriskTerm(pParse, pOldExpr) ){ + int jj; + for(jj=0; jj<pTab->nCol; jj++){ + Expr *pNewExpr; + if( IsHiddenColumn(pTab->aCol+jj) ) continue; + pNewExpr = sqlite3Expr(db, TK_ID, pTab->aCol[jj].zName); + pNew = sqlite3ExprListAppend(pParse, pNew, pNewExpr); + if( !db->mallocFailed ){ + struct ExprList_item *pItem = &pNew->a[pNew->nExpr-1]; + pItem->zEName = sqlite3DbStrDup(db, pTab->aCol[jj].zName); + pItem->eEName = ENAME_NAME; + } + } + }else{ + Expr *pNewExpr = sqlite3ExprDup(db, pOldExpr, 0); + pNew = sqlite3ExprListAppend(pParse, pNew, pNewExpr); + if( !db->mallocFailed && ALWAYS(pList->a[i].zEName!=0) ){ + struct ExprList_item *pItem = &pNew->a[pNew->nExpr-1]; + pItem->zEName = sqlite3DbStrDup(db, pList->a[i].zEName); + pItem->eEName = pList->a[i].eEName; + } + } + } + if( !db->mallocFailed ){ + Vdbe *v = pParse->pVdbe; + assert( v!=0 ); + sqlite3VdbeSetNumCols(v, pNew->nExpr); + for(i=0; i<pNew->nExpr; i++){ + sqlite3VdbeSetColName(v, i, COLNAME_NAME, pNew->a[i].zEName, + SQLITE_TRANSIENT); + } + } + return pNew; +} + +/* +** Generate code for the RETURNING trigger. Unlike other triggers +** that invoke a subprogram in the bytecode, the code for RETURNING +** is generated in-line. +*/ +static void codeReturningTrigger( + Parse *pParse, /* Parse context */ + Trigger *pTrigger, /* The trigger step that defines the RETURNING */ + Table *pTab, /* The table to code triggers from */ + int regIn /* The first in an array of registers */ +){ + Vdbe *v = pParse->pVdbe; + ExprList *pNew; + Returning *pReturning; + + assert( v!=0 ); + assert( pParse->bReturning ); + pReturning = pParse->u1.pReturning; + assert( pTrigger == &(pReturning->retTrig) ); + pNew = sqlite3ExpandReturning(pParse, pReturning->pReturnEL, pTab); + if( pNew ){ + NameContext sNC; + memset(&sNC, 0, sizeof(sNC)); + if( pReturning->nRetCol==0 ){ + pReturning->nRetCol = pNew->nExpr; + pReturning->iRetCur = pParse->nTab++; + } + sNC.pParse = pParse; + sNC.uNC.iBaseReg = regIn; + sNC.ncFlags = NC_UBaseReg; + pParse->eTriggerOp = pTrigger->op; + pParse->pTriggerTab = pTab; + if( sqlite3ResolveExprListNames(&sNC, pNew)==SQLITE_OK ){ + int i; + int nCol = pNew->nExpr; + int reg = pParse->nMem+1; + pParse->nMem += nCol+2; + pReturning->iRetReg = reg; + for(i=0; i<nCol; i++){ + sqlite3ExprCodeFactorable(pParse, pNew->a[i].pExpr, reg+i); + } + sqlite3VdbeAddOp3(v, OP_MakeRecord, reg, i, reg+i); + sqlite3VdbeAddOp2(v, OP_NewRowid, pReturning->iRetCur, reg+i+1); + sqlite3VdbeAddOp3(v, OP_Insert, pReturning->iRetCur, reg+i, reg+i+1); + } + sqlite3ExprListDelete(pParse->db, pNew); + pParse->eTriggerOp = 0; + pParse->pTriggerTab = 0; + } +} + + + /* ** Generate VDBE code for the statements inside the body of a single ** trigger. @@ -137744,6 +139509,7 @@ static int codeTriggerProgram( sqlite3ExprDup(db, pStep->pWhere, 0), pParse->eOrconf, 0, 0, 0 ); + sqlite3VdbeAddOp0(v, OP_ResetCount); break; } case TK_INSERT: { @@ -137754,6 +139520,7 @@ static int codeTriggerProgram( pParse->eOrconf, sqlite3UpsertDup(db, pStep->pUpsert) ); + sqlite3VdbeAddOp0(v, OP_ResetCount); break; } case TK_DELETE: { @@ -137761,6 +139528,7 @@ static int codeTriggerProgram( sqlite3TriggerStepSrc(pParse, pStep), sqlite3ExprDup(db, pStep->pWhere, 0), 0, 0 ); + sqlite3VdbeAddOp0(v, OP_ResetCount); break; } default: assert( pStep->op==TK_SELECT ); { @@ -137772,9 +139540,6 @@ static int codeTriggerProgram( break; } } - if( pStep->op!=TK_SELECT ){ - sqlite3VdbeAddOp0(v, OP_ResetCount); - } } return 0; @@ -137921,7 +139686,6 @@ static TriggerPrg *codeRowTrigger( sqlite3VdbeDelete(v); } - assert( !pSubParse->pAinc && !pSubParse->pZombieTab ); assert( !pSubParse->pTriggerPrg && !pSubParse->nMaxArg ); sqlite3ParserReset(pSubParse); sqlite3StackFree(db, pSubParse); @@ -138023,7 +139787,7 @@ SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect( ** ... ... ** reg+N OLD.* value of right-most column of pTab ** reg+N+1 NEW.rowid -** reg+N+2 OLD.* value of left-most column of pTab +** reg+N+2 NEW.* value of left-most column of pTab ** ... ... ** reg+N+N+1 NEW.* value of right-most column of pTab ** @@ -138068,12 +139832,20 @@ SQLITE_PRIVATE void sqlite3CodeRowTrigger( assert( p->pSchema==p->pTabSchema || p->pSchema==pParse->db->aDb[1].pSchema ); - /* Determine whether we should code this trigger */ - if( p->op==op + /* Determine whether we should code this trigger. One of two choices: + ** 1. The trigger is an exact match to the current DML statement + ** 2. This is a RETURNING trigger for INSERT but we are currently + ** doing the UPDATE part of an UPSERT. + */ + if( (p->op==op || (p->bReturning && p->op==TK_INSERT && op==TK_UPDATE)) && p->tr_tm==tr_tm && checkColumnOverlap(p->pColumns, pChanges) ){ - sqlite3CodeRowTriggerDirect(pParse, p, pTab, reg, orconf, ignoreJump); + if( !p->bReturning ){ + sqlite3CodeRowTriggerDirect(pParse, p, pTab, reg, orconf, ignoreJump); + }else if( sqlite3IsToplevel(pParse) ){ + codeReturningTrigger(pParse, p, pTab, reg); + } } } } @@ -138118,13 +139890,18 @@ SQLITE_PRIVATE u32 sqlite3TriggerColmask( assert( isNew==1 || isNew==0 ); for(p=pTrigger; p; p=p->pNext){ - if( p->op==op && (tr_tm&p->tr_tm) + if( p->op==op + && (tr_tm&p->tr_tm) && checkColumnOverlap(p->pColumns,pChanges) ){ - TriggerPrg *pPrg; - pPrg = getRowTrigger(pParse, p, pTab, orconf); - if( pPrg ){ - mask |= pPrg->aColmask[isNew]; + if( p->bReturning ){ + mask = 0xffffffff; + }else{ + TriggerPrg *pPrg; + pPrg = getRowTrigger(pParse, p, pTab, orconf); + if( pPrg ){ + mask |= pPrg->aColmask[isNew]; + } } } } @@ -138781,6 +140558,7 @@ SQLITE_PRIVATE void sqlite3Update( if( (db->flags&SQLITE_CountRows)!=0 && !pParse->pTriggerTab && !pParse->nested + && !pParse->bReturning && pUpsert==0 ){ regRowCount = ++pParse->nMem; @@ -139244,7 +141022,7 @@ SQLITE_PRIVATE void sqlite3Update( ** that information. */ if( regRowCount ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); + sqlite3VdbeAddOp2(v, OP_ChngCntRow, regRowCount, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows updated", SQLITE_STATIC); } @@ -139479,15 +141257,22 @@ static void updateVirtualTable( /* ** Free a list of Upsert objects */ -SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3 *db, Upsert *p){ - if( p ){ +static void SQLITE_NOINLINE upsertDelete(sqlite3 *db, Upsert *p){ + do{ + Upsert *pNext = p->pNextUpsert; sqlite3ExprListDelete(db, p->pUpsertTarget); sqlite3ExprDelete(db, p->pUpsertTargetWhere); sqlite3ExprListDelete(db, p->pUpsertSet); sqlite3ExprDelete(db, p->pUpsertWhere); + sqlite3DbFree(db, p->pToFree); sqlite3DbFree(db, p); - } + p = pNext; + }while( p ); } +SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3 *db, Upsert *p){ + if( p ) upsertDelete(db, p); +} + /* ** Duplicate an Upsert object. @@ -139498,7 +141283,8 @@ SQLITE_PRIVATE Upsert *sqlite3UpsertDup(sqlite3 *db, Upsert *p){ sqlite3ExprListDup(db, p->pUpsertTarget, 0), sqlite3ExprDup(db, p->pUpsertTargetWhere, 0), sqlite3ExprListDup(db, p->pUpsertSet, 0), - sqlite3ExprDup(db, p->pUpsertWhere, 0) + sqlite3ExprDup(db, p->pUpsertWhere, 0), + sqlite3UpsertDup(db, p->pNextUpsert) ); } @@ -139510,22 +141296,25 @@ SQLITE_PRIVATE Upsert *sqlite3UpsertNew( ExprList *pTarget, /* Target argument to ON CONFLICT, or NULL */ Expr *pTargetWhere, /* Optional WHERE clause on the target */ ExprList *pSet, /* UPDATE columns, or NULL for a DO NOTHING */ - Expr *pWhere /* WHERE clause for the ON CONFLICT UPDATE */ + Expr *pWhere, /* WHERE clause for the ON CONFLICT UPDATE */ + Upsert *pNext /* Next ON CONFLICT clause in the list */ ){ Upsert *pNew; - pNew = sqlite3DbMallocRaw(db, sizeof(Upsert)); + pNew = sqlite3DbMallocZero(db, sizeof(Upsert)); if( pNew==0 ){ sqlite3ExprListDelete(db, pTarget); sqlite3ExprDelete(db, pTargetWhere); sqlite3ExprListDelete(db, pSet); sqlite3ExprDelete(db, pWhere); + sqlite3UpsertDelete(db, pNext); return 0; }else{ pNew->pUpsertTarget = pTarget; pNew->pUpsertTargetWhere = pTargetWhere; pNew->pUpsertSet = pSet; pNew->pUpsertWhere = pWhere; - pNew->pUpsertIdx = 0; + pNew->isDoUpdate = pSet!=0; + pNew->pNextUpsert = pNext; } return pNew; } @@ -139550,6 +141339,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( Expr *pTerm; /* One term of the conflict-target clause */ NameContext sNC; /* Context for resolving symbolic names */ Expr sCol[2]; /* Index column converted into an Expr */ + int nClause = 0; /* Counter of ON CONFLICT clauses */ assert( pTabList->nSrc==1 ); assert( pTabList->a[0].pTab!=0 ); @@ -139563,87 +141353,131 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; sNC.pSrcList = pTabList; - rc = sqlite3ResolveExprListNames(&sNC, pUpsert->pUpsertTarget); - if( rc ) return rc; - rc = sqlite3ResolveExprNames(&sNC, pUpsert->pUpsertTargetWhere); - if( rc ) return rc; + for(; pUpsert && pUpsert->pUpsertTarget; + pUpsert=pUpsert->pNextUpsert, nClause++){ + rc = sqlite3ResolveExprListNames(&sNC, pUpsert->pUpsertTarget); + if( rc ) return rc; + rc = sqlite3ResolveExprNames(&sNC, pUpsert->pUpsertTargetWhere); + if( rc ) return rc; - /* Check to see if the conflict target matches the rowid. */ - pTab = pTabList->a[0].pTab; - pTarget = pUpsert->pUpsertTarget; - iCursor = pTabList->a[0].iCursor; - if( HasRowid(pTab) - && pTarget->nExpr==1 - && (pTerm = pTarget->a[0].pExpr)->op==TK_COLUMN - && pTerm->iColumn==XN_ROWID - ){ - /* The conflict-target is the rowid of the primary table */ - assert( pUpsert->pUpsertIdx==0 ); - return SQLITE_OK; - } - - /* Initialize sCol[0..1] to be an expression parse tree for a - ** single column of an index. The sCol[0] node will be the TK_COLLATE - ** operator and sCol[1] will be the TK_COLUMN operator. Code below - ** will populate the specific collation and column number values - ** prior to comparing against the conflict-target expression. - */ - memset(sCol, 0, sizeof(sCol)); - sCol[0].op = TK_COLLATE; - sCol[0].pLeft = &sCol[1]; - sCol[1].op = TK_COLUMN; - sCol[1].iTable = pTabList->a[0].iCursor; - - /* Check for matches against other indexes */ - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - int ii, jj, nn; - if( !IsUniqueIndex(pIdx) ) continue; - if( pTarget->nExpr!=pIdx->nKeyCol ) continue; - if( pIdx->pPartIdxWhere ){ - if( pUpsert->pUpsertTargetWhere==0 ) continue; - if( sqlite3ExprCompare(pParse, pUpsert->pUpsertTargetWhere, - pIdx->pPartIdxWhere, iCursor)!=0 ){ - continue; - } - } - nn = pIdx->nKeyCol; - for(ii=0; ii<nn; ii++){ - Expr *pExpr; - sCol[0].u.zToken = (char*)pIdx->azColl[ii]; - if( pIdx->aiColumn[ii]==XN_EXPR ){ - assert( pIdx->aColExpr!=0 ); - assert( pIdx->aColExpr->nExpr>ii ); - pExpr = pIdx->aColExpr->a[ii].pExpr; - if( pExpr->op!=TK_COLLATE ){ - sCol[0].pLeft = pExpr; - pExpr = &sCol[0]; - } - }else{ - sCol[0].pLeft = &sCol[1]; - sCol[1].iColumn = pIdx->aiColumn[ii]; - pExpr = &sCol[0]; - } - for(jj=0; jj<nn; jj++){ - if( sqlite3ExprCompare(pParse, pTarget->a[jj].pExpr, pExpr,iCursor)<2 ){ - break; /* Column ii of the index matches column jj of target */ - } - } - if( jj>=nn ){ - /* The target contains no match for column jj of the index */ - break; - } - } - if( ii<nn ){ - /* Column ii of the index did not match any term of the conflict target. - ** Continue the search with the next index. */ + /* Check to see if the conflict target matches the rowid. */ + pTab = pTabList->a[0].pTab; + pTarget = pUpsert->pUpsertTarget; + iCursor = pTabList->a[0].iCursor; + if( HasRowid(pTab) + && pTarget->nExpr==1 + && (pTerm = pTarget->a[0].pExpr)->op==TK_COLUMN + && pTerm->iColumn==XN_ROWID + ){ + /* The conflict-target is the rowid of the primary table */ + assert( pUpsert->pUpsertIdx==0 ); continue; } - pUpsert->pUpsertIdx = pIdx; - return SQLITE_OK; + + /* Initialize sCol[0..1] to be an expression parse tree for a + ** single column of an index. The sCol[0] node will be the TK_COLLATE + ** operator and sCol[1] will be the TK_COLUMN operator. Code below + ** will populate the specific collation and column number values + ** prior to comparing against the conflict-target expression. + */ + memset(sCol, 0, sizeof(sCol)); + sCol[0].op = TK_COLLATE; + sCol[0].pLeft = &sCol[1]; + sCol[1].op = TK_COLUMN; + sCol[1].iTable = pTabList->a[0].iCursor; + + /* Check for matches against other indexes */ + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + int ii, jj, nn; + if( !IsUniqueIndex(pIdx) ) continue; + if( pTarget->nExpr!=pIdx->nKeyCol ) continue; + if( pIdx->pPartIdxWhere ){ + if( pUpsert->pUpsertTargetWhere==0 ) continue; + if( sqlite3ExprCompare(pParse, pUpsert->pUpsertTargetWhere, + pIdx->pPartIdxWhere, iCursor)!=0 ){ + continue; + } + } + nn = pIdx->nKeyCol; + for(ii=0; ii<nn; ii++){ + Expr *pExpr; + sCol[0].u.zToken = (char*)pIdx->azColl[ii]; + if( pIdx->aiColumn[ii]==XN_EXPR ){ + assert( pIdx->aColExpr!=0 ); + assert( pIdx->aColExpr->nExpr>ii ); + pExpr = pIdx->aColExpr->a[ii].pExpr; + if( pExpr->op!=TK_COLLATE ){ + sCol[0].pLeft = pExpr; + pExpr = &sCol[0]; + } + }else{ + sCol[0].pLeft = &sCol[1]; + sCol[1].iColumn = pIdx->aiColumn[ii]; + pExpr = &sCol[0]; + } + for(jj=0; jj<nn; jj++){ + if( sqlite3ExprCompare(pParse,pTarget->a[jj].pExpr,pExpr,iCursor)<2 ){ + break; /* Column ii of the index matches column jj of target */ + } + } + if( jj>=nn ){ + /* The target contains no match for column jj of the index */ + break; + } + } + if( ii<nn ){ + /* Column ii of the index did not match any term of the conflict target. + ** Continue the search with the next index. */ + continue; + } + pUpsert->pUpsertIdx = pIdx; + break; + } + if( pUpsert->pUpsertIdx==0 ){ + char zWhich[16]; + if( nClause==0 && pUpsert->pNextUpsert==0 ){ + zWhich[0] = 0; + }else{ + sqlite3_snprintf(sizeof(zWhich),zWhich,"%r ", nClause+1); + } + sqlite3ErrorMsg(pParse, "%sON CONFLICT clause does not match any " + "PRIMARY KEY or UNIQUE constraint", zWhich); + return SQLITE_ERROR; + } } - sqlite3ErrorMsg(pParse, "ON CONFLICT clause does not match any " - "PRIMARY KEY or UNIQUE constraint"); - return SQLITE_ERROR; + return SQLITE_OK; +} + +/* +** Return true if pUpsert is the last ON CONFLICT clause with a +** conflict target, or if pUpsert is followed by another ON CONFLICT +** clause that targets the INTEGER PRIMARY KEY. +*/ +SQLITE_PRIVATE int sqlite3UpsertNextIsIPK(Upsert *pUpsert){ + Upsert *pNext; + if( NEVER(pUpsert==0) ) return 0; + pNext = pUpsert->pNextUpsert; + if( pNext==0 ) return 1; + if( pNext->pUpsertTarget==0 ) return 1; + if( pNext->pUpsertIdx==0 ) return 1; + return 0; +} + +/* +** Given the list of ON CONFLICT clauses described by pUpsert, and +** a particular index pIdx, return a pointer to the particular ON CONFLICT +** clause that applies to the index. Or, if the index is not subject to +** any ON CONFLICT clause, return NULL. +*/ +SQLITE_PRIVATE Upsert *sqlite3UpsertOfIndex(Upsert *pUpsert, Index *pIdx){ + while( + pUpsert + && pUpsert->pUpsertTarget!=0 + && pUpsert->pUpsertIdx!=pIdx + ){ + pUpsert = pUpsert->pNextUpsert; + } + return pUpsert; } /* @@ -139667,11 +141501,13 @@ SQLITE_PRIVATE void sqlite3UpsertDoUpdate( SrcList *pSrc; /* FROM clause for the UPDATE */ int iDataCur; int i; + Upsert *pTop = pUpsert; assert( v!=0 ); assert( pUpsert!=0 ); - VdbeNoopComment((v, "Begin DO UPDATE of UPSERT")); iDataCur = pUpsert->iDataCur; + pUpsert = sqlite3UpsertOfIndex(pTop, pIdx); + VdbeNoopComment((v, "Begin DO UPDATE of UPSERT")); if( pIdx && iCur!=iDataCur ){ if( HasRowid(pTab) ){ int regRowid = sqlite3GetTempReg(pParse); @@ -139701,19 +141537,17 @@ SQLITE_PRIVATE void sqlite3UpsertDoUpdate( sqlite3VdbeJumpHere(v, i); } } - /* pUpsert does not own pUpsertSrc - the outer INSERT statement does. So - ** we have to make a copy before passing it down into sqlite3Update() */ - pSrc = sqlite3SrcListDup(db, pUpsert->pUpsertSrc, 0); + /* pUpsert does not own pTop->pUpsertSrc - the outer INSERT statement does. + ** So we have to make a copy before passing it down into sqlite3Update() */ + pSrc = sqlite3SrcListDup(db, pTop->pUpsertSrc, 0); /* excluded.* columns of type REAL need to be converted to a hard real */ for(i=0; i<pTab->nCol; i++){ if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){ - sqlite3VdbeAddOp1(v, OP_RealAffinity, pUpsert->regData+i); + sqlite3VdbeAddOp1(v, OP_RealAffinity, pTop->regData+i); } } - sqlite3Update(pParse, pSrc, pUpsert->pUpsertSet, - pUpsert->pUpsertWhere, OE_Abort, 0, 0, pUpsert); - pUpsert->pUpsertSet = 0; /* Will have been deleted by sqlite3Update() */ - pUpsert->pUpsertWhere = 0; /* Will have been deleted by sqlite3Update() */ + sqlite3Update(pParse, pSrc, sqlite3ExprListDup(db,pUpsert->pUpsertSet,0), + sqlite3ExprDup(db,pUpsert->pUpsertWhere,0), OE_Abort, 0, 0, pUpsert); VdbeNoopComment((v, "End DO UPDATE of UPSERT")); } @@ -140619,7 +142453,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ sqlite3VdbeAddOp0(v, OP_Expire); zWhere = sqlite3MPrintf(db, "name=%Q AND sql=%Q", pTab->zName, zStmt); - sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere); + sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere, 0); sqlite3DbFree(db, zStmt); iReg = ++pParse->nMem; @@ -140790,6 +142624,7 @@ static int vtabCallConstructor( zType[i-1] = '\0'; } pTab->aCol[iCol].colFlags |= COLFLAG_HIDDEN; + pTab->tabFlags |= TF_HasHidden; oooHidden = TF_OOOHidden; }else{ pTab->tabFlags |= oooHidden; @@ -140958,7 +142793,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ Table *pNew = sParse.pNewTable; Index *pIdx; pTab->aCol = pNew->aCol; - pTab->nCol = pNew->nCol; + pTab->nNVCol = pTab->nCol = pNew->nCol; pTab->tabFlags |= pNew->tabFlags & (TF_WithoutRowid|TF_NoVisibleRowid); pNew->nCol = 0; pNew->aCol = 0; @@ -141490,19 +143325,6 @@ SQLITE_API int sqlite3_vtab_config(sqlite3 *db, int op, ...){ #ifndef SQLITE_WHEREINT_H #define SQLITE_WHEREINT_H -/* -** Trace output macros -*/ -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) -/***/ extern int sqlite3WhereTrace; -#endif -#if defined(SQLITE_DEBUG) \ - && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_WHERETRACE)) -# define WHERETRACE(K,X) if(sqlite3WhereTrace&(K)) sqlite3DebugPrintf X -# define WHERETRACE_ENABLED 1 -#else -# define WHERETRACE(K,X) -#endif /* Forward references */ @@ -141756,11 +143578,7 @@ struct WhereTerm { #define TERM_ORINFO 0x0010 /* Need to free the WhereTerm.u.pOrInfo object */ #define TERM_ANDINFO 0x0020 /* Need to free the WhereTerm.u.pAndInfo obj */ #define TERM_OR_OK 0x0040 /* Used during OR-clause processing */ -#ifdef SQLITE_ENABLE_STAT4 -# define TERM_VNULL 0x0080 /* Manufactured x>NULL or x<=NULL term */ -#else -# define TERM_VNULL 0x0000 /* Disabled if not using stat4 */ -#endif +#define TERM_VNULL 0x0080 /* Manufactured x>NULL or x<=NULL term */ #define TERM_LIKEOPT 0x0100 /* Virtual terms from the LIKE optimization */ #define TERM_LIKECOND 0x0200 /* Conditionally this LIKE operator term */ #define TERM_LIKE 0x0400 /* The original LIKE operator */ @@ -142030,7 +143848,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet*, Expr*); SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet*, Expr*); SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet*, ExprList*); SQLITE_PRIVATE void sqlite3WhereExprAnalyze(SrcList*, WhereClause*); -SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereClause*); +SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*); @@ -142208,7 +144026,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( if( sqlite3ParseToplevel(pParse)->explain==2 ) #endif { - struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom]; + SrcItem *pItem = &pTabList->a[pLevel->iFrom]; Vdbe *v = pParse->pVdbe; /* VM being constructed */ sqlite3 *db = pParse->db; /* Database handle */ int isSearch; /* True for a SEARCH. False for SCAN. */ @@ -143001,7 +144819,7 @@ static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){ ** Insert an OP_CursorHint instruction if it is appropriate to do so. */ static void codeCursorHint( - struct SrcList_item *pTabItem, /* FROM clause item */ + SrcItem *pTabItem, /* FROM clause item */ WhereInfo *pWInfo, /* The where clause */ WhereLevel *pLevel, /* Which loop to provide hints for */ WhereTerm *pEndRange /* Hint this end-of-scan boundary term if not NULL */ @@ -143376,7 +145194,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( WhereClause *pWC; /* Decomposition of the entire WHERE clause */ WhereTerm *pTerm; /* A WHERE clause term */ sqlite3 *db; /* Database connection */ - struct SrcList_item *pTabItem; /* FROM clause term being coded */ + SrcItem *pTabItem; /* FROM clause term being coded */ int addrBrk; /* Jump here to break out of the loop */ int addrHalt; /* addrBrk for the outermost loop */ int addrCont; /* Jump here to continue with next cycle */ @@ -143822,6 +145640,12 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( SWAP(u8, nBtm, nTop); } + if( iLevel>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)!=0 ){ + /* In case OP_SeekScan is used, ensure that the index cursor does not + ** point to a valid row for the first iteration of this loop. */ + sqlite3VdbeAddOp1(v, OP_NullRow, iIdxCur); + } + /* Generate code to evaluate all constraint terms using == or IN ** and store the values of those terms in an array of registers ** starting at regBase. @@ -144158,7 +145982,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( */ if( pWInfo->nLevel>1 ){ int nNotReady; /* The number of notReady tables */ - struct SrcList_item *origSrc; /* Original list of tables */ + SrcItem *origSrc; /* Original list of tables */ nNotReady = pWInfo->nLevel - iLevel - 1; pOrTab = sqlite3StackAllocRaw(db, sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); @@ -145577,6 +147401,277 @@ static int exprMightBeIndexed( return exprMightBeIndexed2(pFrom,mPrereq,aiCurCol,pExpr); } +/* +** Expression callback for exprUsesSrclist(). +*/ +static int exprUsesSrclistCb(Walker *p, Expr *pExpr){ + if( pExpr->op==TK_COLUMN ){ + SrcList *pSrc = p->u.pSrcList; + int iCsr = pExpr->iTable; + int ii; + for(ii=0; ii<pSrc->nSrc; ii++){ + if( pSrc->a[ii].iCursor==iCsr ){ + return p->eCode ? WRC_Abort : WRC_Continue; + } + } + return p->eCode ? WRC_Continue : WRC_Abort; + } + return WRC_Continue; +} + +/* +** Select callback for exprUsesSrclist(). +*/ +static int exprUsesSrclistSelectCb(Walker *NotUsed1, Select *NotUsed2){ + UNUSED_PARAMETER(NotUsed1); + UNUSED_PARAMETER(NotUsed2); + return WRC_Abort; +} + +/* +** This function always returns true if expression pExpr contains +** a sub-select. +** +** If there is no sub-select in pExpr, then return true if pExpr +** contains a TK_COLUMN node for a table that is (bUses==1) +** or is not (bUses==0) in pSrc. +** +** Said another way: +** +** bUses Return Meaning +** -------- ------ ------------------------------------------------ +** +** bUses==1 true pExpr contains either a sub-select or a +** TK_COLUMN referencing pSrc. +** +** bUses==1 false pExpr contains no sub-selects and all TK_COLUMN +** nodes reference tables not found in pSrc +** +** bUses==0 true pExpr contains either a sub-select or a TK_COLUMN +** that references a table not in pSrc. +** +** bUses==0 false pExpr contains no sub-selects and all TK_COLUMN +** nodes reference pSrc +*/ +static int exprUsesSrclist(SrcList *pSrc, Expr *pExpr, int bUses){ + Walker sWalker; + memset(&sWalker, 0, sizeof(Walker)); + sWalker.eCode = bUses; + sWalker.u.pSrcList = pSrc; + sWalker.xExprCallback = exprUsesSrclistCb; + sWalker.xSelectCallback = exprUsesSrclistSelectCb; + return (sqlite3WalkExpr(&sWalker, pExpr)==WRC_Abort); +} + +/* +** Context object used by exprExistsToInIter() as it iterates through an +** expression tree. +*/ +struct ExistsToInCtx { + SrcList *pSrc; /* The tables in an EXISTS(SELECT ... FROM <here> ...) */ + Expr *pInLhs; /* OUT: Use this as the LHS of the IN operator */ + Expr *pEq; /* OUT: The == term that include pInLhs */ + Expr **ppAnd; /* OUT: The AND operator that includes pEq as a child */ + Expr **ppParent; /* The AND operator currently being examined */ +}; + +/* +** Iterate through all AND connected nodes in the expression tree +** headed by (*ppExpr), populating the structure passed as the first +** argument with the values required by exprAnalyzeExistsFindEq(). +** +** This function returns non-zero if the expression tree does not meet +** the two conditions described by the header comment for +** exprAnalyzeExistsFindEq(), or zero if it does. +*/ +static int exprExistsToInIter(struct ExistsToInCtx *p, Expr **ppExpr){ + Expr *pExpr = *ppExpr; + switch( pExpr->op ){ + case TK_AND: + p->ppParent = ppExpr; + if( exprExistsToInIter(p, &pExpr->pLeft) ) return 1; + p->ppParent = ppExpr; + if( exprExistsToInIter(p, &pExpr->pRight) ) return 1; + break; + case TK_EQ: { + int bLeft = exprUsesSrclist(p->pSrc, pExpr->pLeft, 0); + int bRight = exprUsesSrclist(p->pSrc, pExpr->pRight, 0); + if( bLeft || bRight ){ + if( (bLeft && bRight) || p->pInLhs ) return 1; + p->pInLhs = bLeft ? pExpr->pLeft : pExpr->pRight; + if( exprUsesSrclist(p->pSrc, p->pInLhs, 1) ) return 1; + p->pEq = pExpr; + p->ppAnd = p->ppParent; + } + break; + } + default: + if( exprUsesSrclist(p->pSrc, pExpr, 0) ){ + return 1; + } + break; + } + + return 0; +} + +/* +** This function is used by exprAnalyzeExists() when creating virtual IN(...) +** terms equivalent to user-supplied EXIST(...) clauses. It splits the WHERE +** clause of the Select object passed as the first argument into one or more +** expressions joined by AND operators, and then tests if the following are +** true: +** +** 1. Exactly one of the AND separated terms refers to the outer +** query, and it is an == (TK_EQ) expression. +** +** 2. Only one side of the == expression refers to the outer query, and +** it does not refer to any columns from the inner query. +** +** If both these conditions are true, then a pointer to the side of the == +** expression that refers to the outer query is returned. The caller will +** use this expression as the LHS of the IN(...) virtual term. Or, if one +** or both of the above conditions are not true, NULL is returned. +** +** If non-NULL is returned and ppEq is non-NULL, *ppEq is set to point +** to the == expression node before returning. If pppAnd is non-NULL and +** the == node is not the root of the WHERE clause, then *pppAnd is set +** to point to the pointer to the AND node that is the parent of the == +** node within the WHERE expression tree. +*/ +static Expr *exprAnalyzeExistsFindEq( + Select *pSel, /* The SELECT of the EXISTS */ + Expr **ppEq, /* OUT: == node from WHERE clause */ + Expr ***pppAnd /* OUT: Pointer to parent of ==, if any */ +){ + struct ExistsToInCtx ctx; + memset(&ctx, 0, sizeof(ctx)); + ctx.pSrc = pSel->pSrc; + if( exprExistsToInIter(&ctx, &pSel->pWhere) ){ + return 0; + } + if( ppEq ) *ppEq = ctx.pEq; + if( pppAnd ) *pppAnd = ctx.ppAnd; + return ctx.pInLhs; +} + +/* +** Term idxTerm of the WHERE clause passed as the second argument is an +** EXISTS expression with a correlated SELECT statement on the RHS. +** This function analyzes the SELECT statement, and if possible adds an +** equivalent "? IN(SELECT...)" virtual term to the WHERE clause. +** +** For an EXISTS term such as the following: +** +** EXISTS (SELECT ... FROM <srclist> WHERE <e1> = <e2> AND <e3>) +** +** The virtual IN() term added is: +** +** <e1> IN (SELECT <e2> FROM <srclist> WHERE <e3>) +** +** The virtual term is only added if the following conditions are met: +** +** 1. The sub-select must not be an aggregate or use window functions, +** +** 2. The sub-select must not be a compound SELECT, +** +** 3. Expression <e1> must refer to at least one column from the outer +** query, and must not refer to any column from the inner query +** (i.e. from <srclist>). +** +** 4. <e2> and <e3> must not refer to any values from the outer query. +** In other words, once <e1> has been removed, the inner query +** must not be correlated. +** +*/ +static void exprAnalyzeExists( + SrcList *pSrc, /* the FROM clause */ + WhereClause *pWC, /* the WHERE clause */ + int idxTerm /* Index of the term to be analyzed */ +){ + Parse *pParse = pWC->pWInfo->pParse; + WhereTerm *pTerm = &pWC->a[idxTerm]; + Expr *pExpr = pTerm->pExpr; + Select *pSel = pExpr->x.pSelect; + Expr *pDup = 0; + Expr *pEq = 0; + Expr *pRet = 0; + Expr *pInLhs = 0; + Expr **ppAnd = 0; + int idxNew; + sqlite3 *db = pParse->db; + + assert( pExpr->op==TK_EXISTS ); + assert( (pExpr->flags & EP_VarSelect) && (pExpr->flags & EP_xIsSelect) ); + + if( pSel->selFlags & SF_Aggregate ) return; +#ifndef SQLITE_OMIT_WINDOWFUNC + if( pSel->pWin ) return; +#endif + if( pSel->pPrior ) return; + if( pSel->pWhere==0 ) return; + if( pSel->pLimit ) return; + if( 0==exprAnalyzeExistsFindEq(pSel, 0, 0) ) return; + + pDup = sqlite3ExprDup(db, pExpr, 0); + if( db->mallocFailed ){ + sqlite3ExprDelete(db, pDup); + return; + } + pSel = pDup->x.pSelect; + sqlite3ExprListDelete(db, pSel->pEList); + pSel->pEList = 0; + + pInLhs = exprAnalyzeExistsFindEq(pSel, &pEq, &ppAnd); + assert( pInLhs && pEq ); + assert( pEq==pSel->pWhere || ppAnd ); + if( pInLhs==pEq->pLeft ){ + pRet = pEq->pRight; + }else{ + CollSeq *p = sqlite3ExprCompareCollSeq(pParse, pEq); + pInLhs = sqlite3ExprAddCollateString(pParse, pInLhs, p?p->zName:"BINARY"); + pRet = pEq->pLeft; + } + + assert( pDup->pLeft==0 ); + pDup->op = TK_IN; + pDup->pLeft = pInLhs; + pDup->flags &= ~EP_VarSelect; + if( pRet->op==TK_VECTOR ){ + pSel->pEList = pRet->x.pList; + pRet->x.pList = 0; + sqlite3ExprDelete(db, pRet); + }else{ + pSel->pEList = sqlite3ExprListAppend(pParse, 0, pRet); + } + pEq->pLeft = 0; + pEq->pRight = 0; + if( ppAnd ){ + Expr *pAnd = *ppAnd; + Expr *pOther = (pAnd->pLeft==pEq) ? pAnd->pRight : pAnd->pLeft; + pAnd->pLeft = pAnd->pRight = 0; + sqlite3ExprDelete(db, pAnd); + *ppAnd = pOther; + }else{ + assert( pSel->pWhere==pEq ); + pSel->pWhere = 0; + } + sqlite3ExprDelete(db, pEq); + +#ifdef WHERETRACE_ENABLED /* 0x20 */ + if( sqlite3WhereTrace & 0x20 ){ + sqlite3DebugPrintf("Convert EXISTS:\n"); + sqlite3TreeViewExpr(0, pExpr, 0); + sqlite3DebugPrintf("into IN:\n"); + sqlite3TreeViewExpr(0, pDup, 0); + } +#endif + idxNew = whereClauseInsert(pWC, pDup, TERM_VIRTUAL|TERM_DYNAMIC); + exprAnalyze(pSrc, pWC, idxNew); + markTermAsChild(pWC, idxNew, idxTerm); + pWC->a[idxTerm].wtFlags |= TERM_COPIED; +} + /* ** The input to this routine is an WhereTerm structure with only the ** "pExpr" field filled in. The job of this routine is to analyze the @@ -145710,6 +147805,12 @@ static void exprAnalyze( pNew->prereqRight = prereqLeft | extraRight; pNew->prereqAll = prereqAll; pNew->eOperator = (operatorMask(pDup->op) + eExtraOp) & opMask; + }else if( op==TK_ISNULL && 0==sqlite3ExprCanBeNull(pLeft) ){ + pExpr->op = TK_TRUEFALSE; + pExpr->u.zToken = "false"; + ExprSetProperty(pExpr, EP_IsFalse); + pTerm->prereqAll = 0; + pTerm->eOperator = 0; } } @@ -145762,6 +147863,52 @@ static void exprAnalyze( } #endif /* SQLITE_OMIT_OR_OPTIMIZATION */ + else if( pExpr->op==TK_EXISTS ){ + /* Perhaps treat an EXISTS operator as an IN operator */ + if( (pExpr->flags & EP_VarSelect)!=0 + && OptimizationEnabled(db, SQLITE_ExistsToIN) + ){ + exprAnalyzeExists(pSrc, pWC, idxTerm); + } + } + + /* The form "x IS NOT NULL" can sometimes be evaluated more efficiently + ** as "x>NULL" if x is not an INTEGER PRIMARY KEY. So construct a + ** virtual term of that form. + ** + ** The virtual term must be tagged with TERM_VNULL. + */ + else if( pExpr->op==TK_NOTNULL ){ + if( pExpr->pLeft->op==TK_COLUMN + && pExpr->pLeft->iColumn>=0 + && !ExprHasProperty(pExpr, EP_FromJoin) + ){ + Expr *pNewExpr; + Expr *pLeft = pExpr->pLeft; + int idxNew; + WhereTerm *pNewTerm; + + pNewExpr = sqlite3PExpr(pParse, TK_GT, + sqlite3ExprDup(db, pLeft, 0), + sqlite3ExprAlloc(db, TK_NULL, 0, 0)); + + idxNew = whereClauseInsert(pWC, pNewExpr, + TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL); + if( idxNew ){ + pNewTerm = &pWC->a[idxNew]; + pNewTerm->prereqRight = 0; + pNewTerm->leftCursor = pLeft->iTable; + pNewTerm->u.x.leftColumn = pLeft->iColumn; + pNewTerm->eOperator = WO_GT; + markTermAsChild(pWC, idxNew, idxTerm); + pTerm = &pWC->a[idxTerm]; + pTerm->wtFlags |= TERM_COPIED; + pNewTerm->prereqAll = pTerm->prereqAll; + } + } + } + + #ifndef SQLITE_OMIT_LIKE_OPTIMIZATION /* Add constraints to reduce the search space on a LIKE or GLOB ** operator. @@ -145776,7 +147923,8 @@ static void exprAnalyze( ** bound is made all lowercase so that the bounds also work when comparing ** BLOBs. */ - if( pWC->op==TK_AND + else if( pExpr->op==TK_FUNCTION + && pWC->op==TK_AND && isLikeOrGlob(pParse, pExpr, &pStr1, &isComplete, &noCase) ){ Expr *pLeft; /* LHS of LIKE/GLOB operator */ @@ -145846,6 +147994,65 @@ static void exprAnalyze( } #endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ + /* If there is a vector == or IS term - e.g. "(a, b) == (?, ?)" - create + ** new terms for each component comparison - "a = ?" and "b = ?". The + ** new terms completely replace the original vector comparison, which is + ** no longer used. + ** + ** This is only required if at least one side of the comparison operation + ** is not a sub-select. */ + if( (pExpr->op==TK_EQ || pExpr->op==TK_IS) + && (nLeft = sqlite3ExprVectorSize(pExpr->pLeft))>1 + && sqlite3ExprVectorSize(pExpr->pRight)==nLeft + && ( (pExpr->pLeft->flags & EP_xIsSelect)==0 + || (pExpr->pRight->flags & EP_xIsSelect)==0) + && pWC->op==TK_AND + ){ + int i; + for(i=0; i<nLeft; i++){ + int idxNew; + Expr *pNew; + Expr *pLeft = sqlite3ExprForVectorField(pParse, pExpr->pLeft, i); + Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i); + + pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight); + transferJoinMarkings(pNew, pExpr); + idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC); + exprAnalyze(pSrc, pWC, idxNew); + } + pTerm = &pWC->a[idxTerm]; + pTerm->wtFlags |= TERM_CODED|TERM_VIRTUAL; /* Disable the original */ + pTerm->eOperator = 0; + } + + /* If there is a vector IN term - e.g. "(a, b) IN (SELECT ...)" - create + ** a virtual term for each vector component. The expression object + ** used by each such virtual term is pExpr (the full vector IN(...) + ** expression). The WhereTerm.u.x.iField variable identifies the index within + ** the vector on the LHS that the virtual term represents. + ** + ** This only works if the RHS is a simple SELECT (not a compound) that does + ** not use window functions. + */ + else if( pExpr->op==TK_IN + && pTerm->u.x.iField==0 + && pExpr->pLeft->op==TK_VECTOR + && pExpr->x.pSelect->pPrior==0 +#ifndef SQLITE_OMIT_WINDOWFUNC + && pExpr->x.pSelect->pWin==0 +#endif + && pWC->op==TK_AND + ){ + int i; + for(i=0; i<sqlite3ExprVectorSize(pExpr->pLeft); i++){ + int idxNew; + idxNew = whereClauseInsert(pWC, pExpr, TERM_VIRTUAL); + pWC->a[idxNew].u.x.iField = i+1; + exprAnalyze(pSrc, pWC, idxNew); + markTermAsChild(pWC, idxNew, idxTerm); + } + } + #ifndef SQLITE_OMIT_VIRTUALTABLE /* Add a WO_AUX auxiliary term to the constraint set if the ** current expression is of the form "column OP expr" where OP @@ -145856,7 +148063,7 @@ static void exprAnalyze( ** virtual tables. The native query optimizer does not attempt ** to do anything with MATCH functions. */ - if( pWC->op==TK_AND ){ + else if( pWC->op==TK_AND ){ Expr *pRight = 0, *pLeft = 0; int res = isAuxiliaryVtabOperator(db, pExpr, &eOp2, &pLeft, &pRight); while( res-- > 0 ){ @@ -145892,102 +148099,6 @@ static void exprAnalyze( } #endif /* SQLITE_OMIT_VIRTUALTABLE */ - /* If there is a vector == or IS term - e.g. "(a, b) == (?, ?)" - create - ** new terms for each component comparison - "a = ?" and "b = ?". The - ** new terms completely replace the original vector comparison, which is - ** no longer used. - ** - ** This is only required if at least one side of the comparison operation - ** is not a sub-select. */ - if( pWC->op==TK_AND - && (pExpr->op==TK_EQ || pExpr->op==TK_IS) - && (nLeft = sqlite3ExprVectorSize(pExpr->pLeft))>1 - && sqlite3ExprVectorSize(pExpr->pRight)==nLeft - && ( (pExpr->pLeft->flags & EP_xIsSelect)==0 - || (pExpr->pRight->flags & EP_xIsSelect)==0) - ){ - int i; - for(i=0; i<nLeft; i++){ - int idxNew; - Expr *pNew; - Expr *pLeft = sqlite3ExprForVectorField(pParse, pExpr->pLeft, i); - Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i); - - pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight); - transferJoinMarkings(pNew, pExpr); - idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC); - exprAnalyze(pSrc, pWC, idxNew); - } - pTerm = &pWC->a[idxTerm]; - pTerm->wtFlags |= TERM_CODED|TERM_VIRTUAL; /* Disable the original */ - pTerm->eOperator = 0; - } - - /* If there is a vector IN term - e.g. "(a, b) IN (SELECT ...)" - create - ** a virtual term for each vector component. The expression object - ** used by each such virtual term is pExpr (the full vector IN(...) - ** expression). The WhereTerm.u.x.iField variable identifies the index within - ** the vector on the LHS that the virtual term represents. - ** - ** This only works if the RHS is a simple SELECT (not a compound) that does - ** not use window functions. - */ - if( pWC->op==TK_AND && pExpr->op==TK_IN && pTerm->u.x.iField==0 - && pExpr->pLeft->op==TK_VECTOR - && pExpr->x.pSelect->pPrior==0 -#ifndef SQLITE_OMIT_WINDOWFUNC - && pExpr->x.pSelect->pWin==0 -#endif - ){ - int i; - for(i=0; i<sqlite3ExprVectorSize(pExpr->pLeft); i++){ - int idxNew; - idxNew = whereClauseInsert(pWC, pExpr, TERM_VIRTUAL); - pWC->a[idxNew].u.x.iField = i+1; - exprAnalyze(pSrc, pWC, idxNew); - markTermAsChild(pWC, idxNew, idxTerm); - } - } - -#ifdef SQLITE_ENABLE_STAT4 - /* When sqlite_stat4 histogram data is available an operator of the - ** form "x IS NOT NULL" can sometimes be evaluated more efficiently - ** as "x>NULL" if x is not an INTEGER PRIMARY KEY. So construct a - ** virtual term of that form. - ** - ** Note that the virtual term must be tagged with TERM_VNULL. - */ - if( pExpr->op==TK_NOTNULL - && pExpr->pLeft->op==TK_COLUMN - && pExpr->pLeft->iColumn>=0 - && !ExprHasProperty(pExpr, EP_FromJoin) - && OptimizationEnabled(db, SQLITE_Stat4) - ){ - Expr *pNewExpr; - Expr *pLeft = pExpr->pLeft; - int idxNew; - WhereTerm *pNewTerm; - - pNewExpr = sqlite3PExpr(pParse, TK_GT, - sqlite3ExprDup(db, pLeft, 0), - sqlite3ExprAlloc(db, TK_NULL, 0, 0)); - - idxNew = whereClauseInsert(pWC, pNewExpr, - TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL); - if( idxNew ){ - pNewTerm = &pWC->a[idxNew]; - pNewTerm->prereqRight = 0; - pNewTerm->leftCursor = pLeft->iTable; - pNewTerm->u.x.leftColumn = pLeft->iColumn; - pNewTerm->eOperator = WO_GT; - markTermAsChild(pWC, idxNew, idxTerm); - pTerm = &pWC->a[idxTerm]; - pTerm->wtFlags |= TERM_COPIED; - pNewTerm->prereqAll = pTerm->prereqAll; - } - } -#endif /* SQLITE_ENABLE_STAT4 */ - /* Prevent ON clause terms of a LEFT JOIN from being used to drive ** an index for tables to the left of the join. */ @@ -146146,7 +148257,7 @@ SQLITE_PRIVATE void sqlite3WhereExprAnalyze( */ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( Parse *pParse, /* Parsing context */ - struct SrcList_item *pItem, /* The FROM clause term to process */ + SrcItem *pItem, /* The FROM clause term to process */ WhereClause *pWC /* Xfer function arguments to here */ ){ Table *pTab; @@ -146223,12 +148334,6 @@ struct HiddenIndexInfo { /* Forward declaration of methods */ static int whereLoopResize(sqlite3*, WhereLoop*, int); -/* Test variable that can be set to enable WHERE tracing */ -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) -/***/ int sqlite3WhereTrace = 0; -#endif - - /* ** Return the estimated number of output rows from a WHERE clause */ @@ -146291,6 +148396,32 @@ SQLITE_PRIVATE int sqlite3WhereOrderByLimitOptLabel(WhereInfo *pWInfo){ return pInner->addrNxt; } +/* +** While generating code for the min/max optimization, after handling +** the aggregate-step call to min() or max(), check to see if any +** additional looping is required. If the output order is such that +** we are certain that the correct answer has already been found, then +** code an OP_Goto to by pass subsequent processing. +** +** Any extra OP_Goto that is coded here is an optimization. The +** correct answer should be obtained regardless. This OP_Goto just +** makes the answer appear faster. +*/ +SQLITE_PRIVATE void sqlite3WhereMinMaxOptEarlyOut(Vdbe *v, WhereInfo *pWInfo){ + WhereLevel *pInner; + int i; + if( !pWInfo->bOrderedInnerLoop ) return; + if( pWInfo->nOBSat==0 ) return; + for(i=pWInfo->nLevel-1; i>=0; i--){ + pInner = &pWInfo->a[i]; + if( (pInner->pWLoop->wsFlags & WHERE_COLUMN_IN)!=0 ){ + sqlite3VdbeGoto(v, pInner->addrNxt); + return; + } + } + sqlite3VdbeGoto(v, pWInfo->iBreak); +} + /* ** Return the VDBE address or label to jump to in order to continue ** immediately with the next row of a WHERE clause. @@ -146860,7 +148991,7 @@ static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ */ static int termCanDriveIndex( WhereTerm *pTerm, /* WHERE clause term to check */ - struct SrcList_item *pSrc, /* Table we are trying to access */ + SrcItem *pSrc, /* Table we are trying to access */ Bitmask notReady /* Tables in outer loops of the join */ ){ char aff; @@ -146894,7 +149025,7 @@ static int termCanDriveIndex( static void constructAutomaticIndex( Parse *pParse, /* The parsing context */ WhereClause *pWC, /* The WHERE clause */ - struct SrcList_item *pSrc, /* The FROM clause term to get the next index */ + SrcItem *pSrc, /* The FROM clause term to get the next index */ Bitmask notReady, /* Mask of cursors that are not available */ WhereLevel *pLevel /* Write new index here */ ){ @@ -146918,7 +149049,7 @@ static void constructAutomaticIndex( u8 sentWarning = 0; /* True if a warnning has been issued */ Expr *pPartial = 0; /* Partial Index Expression */ int iContinue = 0; /* Jump here to skip excluded rows */ - struct SrcList_item *pTabItem; /* FROM clause term being indexed */ + SrcItem *pTabItem; /* FROM clause term being indexed */ int addrCounter = 0; /* Address where integer counter is initialized */ int regBase; /* Array of registers where record is assembled */ @@ -147102,7 +149233,7 @@ static sqlite3_index_info *allocateIndexInfo( Parse *pParse, /* The parsing context */ WhereClause *pWC, /* The WHERE clause being analyzed */ Bitmask mUnusable, /* Ignore terms with these prereqs */ - struct SrcList_item *pSrc, /* The FROM clause term that is the vtab */ + SrcItem *pSrc, /* The FROM clause term that is the vtab */ ExprList *pOrderBy, /* The ORDER BY clause */ u16 *pmNoOmit /* Mask of terms not to omit */ ){ @@ -148000,7 +150131,7 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){ WhereInfo *pWInfo = pWC->pWInfo; int nb = 1+(pWInfo->pTabList->nSrc+3)/4; - struct SrcList_item *pItem = pWInfo->pTabList->a + p->iTab; + SrcItem *pItem = pWInfo->pTabList->a + p->iTab; Table *pTab = pItem->pTab; Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, @@ -148611,7 +150742,7 @@ static int whereRangeVectorLen( */ static int whereLoopAddBtreeIndex( WhereLoopBuilder *pBuilder, /* The WhereLoop factory */ - struct SrcList_item *pSrc, /* FROM clause term being analyzed */ + SrcItem *pSrc, /* FROM clause term being analyzed */ Index *pProbe, /* An index on pSrc */ LogEst nInMul /* log(Number of iterations due to IN) */ ){ @@ -148797,7 +150928,7 @@ static int whereLoopAddBtreeIndex( pBtm = pTerm; pTop = 0; if( pTerm->wtFlags & TERM_LIKEOPT ){ - /* Range contraints that come from the LIKE optimization are + /* Range constraints that come from the LIKE optimization are ** always used in pairs. */ pTop = &pTerm[1]; assert( (pTop-(pTerm->pWC->a))<pTerm->pWC->nTerm ); @@ -149102,7 +151233,7 @@ static int whereLoopAddBtree( LogEst aiRowEstPk[2]; /* The aiRowLogEst[] value for the sPk index */ i16 aiColumnPk = -1; /* The aColumn[] value for the sPk index */ SrcList *pTabList; /* The FROM clause */ - struct SrcList_item *pSrc; /* The FROM clause btree term to add */ + SrcItem *pSrc; /* The FROM clause btree term to add */ WhereLoop *pNew; /* Template WhereLoop object */ int rc = SQLITE_OK; /* Return code */ int iSortIdx = 1; /* Index number */ @@ -149120,9 +151251,9 @@ static int whereLoopAddBtree( pWC = pBuilder->pWC; assert( !IsVirtual(pSrc->pTab) ); - if( pSrc->pIBIndex ){ + if( pSrc->fg.isIndexedBy ){ /* An INDEXED BY clause specifies a particular index to use */ - pProbe = pSrc->pIBIndex; + pProbe = pSrc->u2.pIBIndex; }else if( !HasRowid(pTab) ){ pProbe = pTab->pIndex; }else{ @@ -149158,7 +151289,7 @@ static int whereLoopAddBtree( if( !pBuilder->pOrSet /* Not part of an OR optimization */ && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0 - && pSrc->pIBIndex==0 /* Has no INDEXED BY clause */ + && !pSrc->fg.isIndexedBy /* Has no INDEXED BY clause */ && !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */ && HasRowid(pTab) /* Not WITHOUT ROWID table. (FIXME: Why not?) */ && !pSrc->fg.isCorrelated /* Not a correlated subquery */ @@ -149208,7 +151339,7 @@ static int whereLoopAddBtree( /* Loop over all indices. If there was an INDEXED BY clause, then only ** consider index pProbe. */ for(; rc==SQLITE_OK && pProbe; - pProbe=(pSrc->pIBIndex ? 0 : pProbe->pNext), iSortIdx++ + pProbe=(pSrc->fg.isIndexedBy ? 0 : pProbe->pNext), iSortIdx++ ){ int isLeft = (pSrc->fg.jointype & JT_OUTER)!=0; if( pProbe->pPartIdxWhere!=0 @@ -149383,7 +151514,7 @@ static int whereLoopAddVirtualOne( int rc = SQLITE_OK; WhereLoop *pNew = pBuilder->pNew; Parse *pParse = pBuilder->pWInfo->pParse; - struct SrcList_item *pSrc = &pBuilder->pWInfo->pTabList->a[pNew->iTab]; + SrcItem *pSrc = &pBuilder->pWInfo->pTabList->a[pNew->iTab]; int nConstraint = pIdxInfo->nConstraint; assert( (mUsable & mPrereq)==mPrereq ); @@ -149575,7 +151706,7 @@ static int whereLoopAddVirtual( WhereInfo *pWInfo; /* WHERE analysis context */ Parse *pParse; /* The parsing context */ WhereClause *pWC; /* The WHERE clause */ - struct SrcList_item *pSrc; /* The FROM clause term to search */ + SrcItem *pSrc; /* The FROM clause term to search */ sqlite3_index_info *p; /* Object to pass to xBestIndex() */ int nConstraint; /* Number of constraints in p */ int bIn; /* True if plan uses IN(...) operator */ @@ -149703,7 +151834,7 @@ static int whereLoopAddOr( WhereClause tempWC; WhereLoopBuilder sSubBuild; WhereOrSet sSum, sCur; - struct SrcList_item *pItem; + SrcItem *pItem; pWC = pBuilder->pWC; pWCEnd = pWC->a + pWC->nTerm; @@ -149819,8 +151950,8 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ Bitmask mPrior = 0; int iTab; SrcList *pTabList = pWInfo->pTabList; - struct SrcList_item *pItem; - struct SrcList_item *pEnd = &pTabList->a[pWInfo->nLevel]; + SrcItem *pItem; + SrcItem *pEnd = &pTabList->a[pWInfo->nLevel]; sqlite3 *db = pWInfo->pParse->db; int rc = SQLITE_OK; WhereLoop *pNew; @@ -149843,7 +151974,7 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ } #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pItem->pTab) ){ - struct SrcList_item *p; + SrcItem *p; for(p=&pItem[1]; p<pEnd; p++){ if( mUnusable || (p->fg.jointype & (JT_LEFT|JT_CROSS)) ){ mUnusable |= sqlite3WhereGetMask(&pWInfo->sMaskSet, p->iCursor); @@ -150698,7 +152829,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ */ static int whereShortCut(WhereLoopBuilder *pBuilder){ WhereInfo *pWInfo; - struct SrcList_item *pItem; + SrcItem *pItem; WhereClause *pWC; WhereTerm *pTerm; WhereLoop *pLoop; @@ -151157,7 +153288,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){ pWInfo->revMask = ALLBITS; } - if( pParse->nErr || NEVER(db->mallocFailed) ){ + if( pParse->nErr || db->mallocFailed ){ goto whereBeginError; } #ifdef WHERETRACE_ENABLED @@ -151228,7 +153359,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } for(i=pWInfo->nLevel-1; i>=1; i--){ WhereTerm *pTerm, *pEnd; - struct SrcList_item *pItem; + SrcItem *pItem; pLoop = pWInfo->a[i].pWLoop; pItem = &pWInfo->pTabList->a[pLoop->iTab]; if( (pItem->fg.jointype & JT_LEFT)==0 ) continue; @@ -151318,7 +153449,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( for(ii=0, pLevel=pWInfo->a; ii<nTabList; ii++, pLevel++){ Table *pTab; /* Table to open */ int iDb; /* Index of database containing table/index */ - struct SrcList_item *pTabItem; + SrcItem *pTabItem; pTabItem = &pTabList->a[pLevel->iFrom]; pTab = pTabItem->pTab; @@ -151655,7 +153786,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ int k, last; VdbeOp *pOp, *pLastOp; Index *pIdx = 0; - struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom]; + SrcItem *pTabItem = &pTabList->a[pLevel->iFrom]; Table *pTab = pTabItem->pTab; assert( pTab!=0 ); pLoop = pLevel->pWLoop; @@ -151731,7 +153862,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ #endif pOp = sqlite3VdbeGetOp(v, k); pLastOp = pOp + (last - k); - assert( pOp<pLastOp ); + assert( pOp<pLastOp || (pParse->nErr>0 && pOp==pLastOp) ); do{ if( pOp->p1!=pLevel->iTabCur ){ /* no-op */ @@ -153099,15 +155230,19 @@ SQLITE_PRIVATE void sqlite3WindowAttach(Parse *pParse, Expr *p, Window *pWin){ ** SELECT, or (b) the windows already linked use a compatible window frame. */ SQLITE_PRIVATE void sqlite3WindowLink(Select *pSel, Window *pWin){ - if( pSel!=0 - && (0==pSel->pWin || 0==sqlite3WindowCompare(0, pSel->pWin, pWin, 0)) - ){ - pWin->pNextWin = pSel->pWin; - if( pSel->pWin ){ - pSel->pWin->ppThis = &pWin->pNextWin; + if( pSel ){ + if( 0==pSel->pWin || 0==sqlite3WindowCompare(0, pSel->pWin, pWin, 0) ){ + pWin->pNextWin = pSel->pWin; + if( pSel->pWin ){ + pSel->pWin->ppThis = &pWin->pNextWin; + } + pSel->pWin = pWin; + pWin->ppThis = &pSel->pWin; + }else{ + if( sqlite3ExprListCompare(pWin->pPartition, pSel->pWin->pPartition,-1) ){ + pSel->selFlags |= SF_MultiPart; + } } - pSel->pWin = pWin; - pWin->ppThis = &pSel->pWin; } } @@ -153260,6 +155395,7 @@ static void windowCheckValue(Parse *pParse, int reg, int eCond){ VdbeCoverageIf(v, eCond==2); } sqlite3VdbeAddOp3(v, aOp[eCond], regZero, sqlite3VdbeCurrentAddr(v)+2, reg); + sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC); VdbeCoverageNeverNullIf(v, eCond==0); /* NULL case captured by */ VdbeCoverageNeverNullIf(v, eCond==1); /* the OP_MustBeInt */ VdbeCoverageNeverNullIf(v, eCond==2); @@ -153856,6 +155992,7 @@ static void windowCodeRangeTest( int regString = ++pParse->nMem; /* Reg. for constant value '' */ int arith = OP_Add; /* OP_Add or OP_Subtract */ int addrGe; /* Jump destination */ + CollSeq *pColl; assert( op==OP_Ge || op==OP_Gt || op==OP_Le ); assert( pOrderBy && pOrderBy->nExpr==1 ); @@ -153946,6 +156083,8 @@ static void windowCodeRangeTest( ** control skips over this test if the BIGNULL flag is set and either ** reg1 or reg2 contain a NULL value. */ sqlite3VdbeAddOp3(v, op, reg2, lbl, reg1); VdbeCoverage(v); + pColl = sqlite3ExprNNCollSeq(pParse, pOrderBy->a[0].pExpr); + sqlite3VdbeAppendP4(v, (void*)pColl, P4_COLLSEQ); sqlite3VdbeChangeP5(v, SQLITE_NULLEQ); assert( op==OP_Ge || op==OP_Gt || op==OP_Lt || op==OP_Le ); @@ -154952,11 +157091,21 @@ static void updateDeleteLimitError( static void parserDoubleLinkSelect(Parse *pParse, Select *p){ assert( p!=0 ); if( p->pPrior ){ - Select *pNext = 0, *pLoop; - int mxSelect, cnt = 0; - for(pLoop=p; pLoop; pNext=pLoop, pLoop=pLoop->pPrior, cnt++){ + Select *pNext = 0, *pLoop = p; + int mxSelect, cnt = 1; + while(1){ pLoop->pNext = pNext; pLoop->selFlags |= SF_Compound; + pNext = pLoop; + pLoop = pLoop->pPrior; + if( pLoop==0 ) break; + cnt++; + if( pLoop->pOrderBy || pLoop->pLimit ){ + sqlite3ErrorMsg(pParse,"%s clause should come after %s not before", + pLoop->pOrderBy!=0 ? "ORDER BY" : "LIMIT", + sqlite3SelectOpName(pNext->op)); + break; + } } if( (p->selFlags & SF_MultiValue)==0 && (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 && @@ -154967,6 +157116,19 @@ static void updateDeleteLimitError( } } + /* Attach a With object describing the WITH clause to a Select + ** object describing the query for which the WITH clause is a prefix. + */ + static Select *attachWithToSelect(Parse *pParse, Select *pSelect, With *pWith){ + if( pSelect ){ + pSelect->pWith = pWith; + parserDoubleLinkSelect(pParse, pSelect); + }else{ + sqlite3WithDelete(pParse->db, pWith); + } + return pSelect; + } + /* Construct a new Expr object from a single identifier. Use the ** new Expr to populate pOut. Set the span of pOut to be the identifier @@ -155142,90 +157304,92 @@ static void updateDeleteLimitError( #define TK_TIES 94 #define TK_GENERATED 95 #define TK_ALWAYS 96 -#define TK_REINDEX 97 -#define TK_RENAME 98 -#define TK_CTIME_KW 99 -#define TK_ANY 100 -#define TK_BITAND 101 -#define TK_BITOR 102 -#define TK_LSHIFT 103 -#define TK_RSHIFT 104 -#define TK_PLUS 105 -#define TK_MINUS 106 -#define TK_STAR 107 -#define TK_SLASH 108 -#define TK_REM 109 -#define TK_CONCAT 110 -#define TK_COLLATE 111 -#define TK_BITNOT 112 -#define TK_ON 113 -#define TK_INDEXED 114 -#define TK_STRING 115 -#define TK_JOIN_KW 116 -#define TK_CONSTRAINT 117 -#define TK_DEFAULT 118 -#define TK_NULL 119 -#define TK_PRIMARY 120 -#define TK_UNIQUE 121 -#define TK_CHECK 122 -#define TK_REFERENCES 123 -#define TK_AUTOINCR 124 -#define TK_INSERT 125 -#define TK_DELETE 126 -#define TK_UPDATE 127 -#define TK_SET 128 -#define TK_DEFERRABLE 129 -#define TK_FOREIGN 130 -#define TK_DROP 131 -#define TK_UNION 132 -#define TK_ALL 133 -#define TK_EXCEPT 134 -#define TK_INTERSECT 135 -#define TK_SELECT 136 -#define TK_VALUES 137 -#define TK_DISTINCT 138 -#define TK_DOT 139 -#define TK_FROM 140 -#define TK_JOIN 141 -#define TK_USING 142 -#define TK_ORDER 143 -#define TK_GROUP 144 -#define TK_HAVING 145 -#define TK_LIMIT 146 -#define TK_WHERE 147 -#define TK_INTO 148 -#define TK_NOTHING 149 -#define TK_FLOAT 150 -#define TK_BLOB 151 -#define TK_INTEGER 152 -#define TK_VARIABLE 153 -#define TK_CASE 154 -#define TK_WHEN 155 -#define TK_THEN 156 -#define TK_ELSE 157 -#define TK_INDEX 158 -#define TK_ALTER 159 -#define TK_ADD 160 -#define TK_WINDOW 161 -#define TK_OVER 162 -#define TK_FILTER 163 -#define TK_COLUMN 164 -#define TK_AGG_FUNCTION 165 -#define TK_AGG_COLUMN 166 -#define TK_TRUEFALSE 167 -#define TK_ISNOT 168 -#define TK_FUNCTION 169 -#define TK_UMINUS 170 -#define TK_UPLUS 171 -#define TK_TRUTH 172 -#define TK_REGISTER 173 -#define TK_VECTOR 174 -#define TK_SELECT_COLUMN 175 -#define TK_IF_NULL_ROW 176 -#define TK_ASTERISK 177 -#define TK_SPAN 178 -#define TK_SPACE 179 -#define TK_ILLEGAL 180 +#define TK_MATERIALIZED 97 +#define TK_REINDEX 98 +#define TK_RENAME 99 +#define TK_CTIME_KW 100 +#define TK_ANY 101 +#define TK_BITAND 102 +#define TK_BITOR 103 +#define TK_LSHIFT 104 +#define TK_RSHIFT 105 +#define TK_PLUS 106 +#define TK_MINUS 107 +#define TK_STAR 108 +#define TK_SLASH 109 +#define TK_REM 110 +#define TK_CONCAT 111 +#define TK_COLLATE 112 +#define TK_BITNOT 113 +#define TK_ON 114 +#define TK_INDEXED 115 +#define TK_STRING 116 +#define TK_JOIN_KW 117 +#define TK_CONSTRAINT 118 +#define TK_DEFAULT 119 +#define TK_NULL 120 +#define TK_PRIMARY 121 +#define TK_UNIQUE 122 +#define TK_CHECK 123 +#define TK_REFERENCES 124 +#define TK_AUTOINCR 125 +#define TK_INSERT 126 +#define TK_DELETE 127 +#define TK_UPDATE 128 +#define TK_SET 129 +#define TK_DEFERRABLE 130 +#define TK_FOREIGN 131 +#define TK_DROP 132 +#define TK_UNION 133 +#define TK_ALL 134 +#define TK_EXCEPT 135 +#define TK_INTERSECT 136 +#define TK_SELECT 137 +#define TK_VALUES 138 +#define TK_DISTINCT 139 +#define TK_DOT 140 +#define TK_FROM 141 +#define TK_JOIN 142 +#define TK_USING 143 +#define TK_ORDER 144 +#define TK_GROUP 145 +#define TK_HAVING 146 +#define TK_LIMIT 147 +#define TK_WHERE 148 +#define TK_RETURNING 149 +#define TK_INTO 150 +#define TK_NOTHING 151 +#define TK_FLOAT 152 +#define TK_BLOB 153 +#define TK_INTEGER 154 +#define TK_VARIABLE 155 +#define TK_CASE 156 +#define TK_WHEN 157 +#define TK_THEN 158 +#define TK_ELSE 159 +#define TK_INDEX 160 +#define TK_ALTER 161 +#define TK_ADD 162 +#define TK_WINDOW 163 +#define TK_OVER 164 +#define TK_FILTER 165 +#define TK_COLUMN 166 +#define TK_AGG_FUNCTION 167 +#define TK_AGG_COLUMN 168 +#define TK_TRUEFALSE 169 +#define TK_ISNOT 170 +#define TK_FUNCTION 171 +#define TK_UMINUS 172 +#define TK_UPLUS 173 +#define TK_TRUTH 174 +#define TK_REGISTER 175 +#define TK_VECTOR 176 +#define TK_SELECT_COLUMN 177 +#define TK_IF_NULL_ROW 178 +#define TK_ASTERISK 179 +#define TK_SPAN 180 +#define TK_SPACE 181 +#define TK_ILLEGAL 182 #endif /**************** End token definitions ***************************************/ @@ -155285,28 +157449,29 @@ static void updateDeleteLimitError( #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 310 +#define YYNOCODE 316 #define YYACTIONTYPE unsigned short int -#define YYWILDCARD 100 +#define YYWILDCARD 101 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - SrcList* yy47; - u8 yy58; - struct FrameBound yy77; - With* yy131; - int yy192; - Expr* yy202; - struct {int value; int mask;} yy207; - struct TrigEvent yy230; - ExprList* yy242; - Window* yy303; - Upsert* yy318; - const char* yy436; - TriggerStep* yy447; - Select* yy539; - IdList* yy600; + Window* yy19; + struct TrigEvent yy50; + int yy60; + struct FrameBound yy113; + Upsert* yy178; + With* yy195; + IdList* yy288; + SrcList* yy291; + Select* yy307; + ExprList* yy338; + TriggerStep* yy483; + const char* yy528; + u8 yy570; + Expr* yy602; + Cte* yy607; + struct {int value; int mask;} yy615; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -155322,18 +157487,18 @@ typedef union { #define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse; #define sqlite3ParserCTX_STORE yypParser->pParse=pParse; #define YYFALLBACK 1 -#define YYNSTATE 553 -#define YYNRULE 385 -#define YYNRULE_WITH_ACTION 325 -#define YYNTOKEN 181 -#define YY_MAX_SHIFT 552 -#define YY_MIN_SHIFTREDUCE 803 -#define YY_MAX_SHIFTREDUCE 1187 -#define YY_ERROR_ACTION 1188 -#define YY_ACCEPT_ACTION 1189 -#define YY_NO_ACTION 1190 -#define YY_MIN_REDUCE 1191 -#define YY_MAX_REDUCE 1575 +#define YYNSTATE 570 +#define YYNRULE 398 +#define YYNRULE_WITH_ACTION 337 +#define YYNTOKEN 183 +#define YY_MAX_SHIFT 569 +#define YY_MIN_SHIFTREDUCE 825 +#define YY_MAX_SHIFTREDUCE 1222 +#define YY_ERROR_ACTION 1223 +#define YY_ACCEPT_ACTION 1224 +#define YY_NO_ACTION 1225 +#define YY_MIN_REDUCE 1226 +#define YY_MAX_REDUCE 1623 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -155400,586 +157565,600 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (1962) +#define YY_ACTTAB_COUNT (2020) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 546, 1222, 546, 451, 1260, 546, 1239, 546, 114, 111, - /* 10 */ 211, 546, 1537, 546, 1260, 523, 114, 111, 211, 392, - /* 20 */ 1232, 344, 42, 42, 42, 42, 1225, 42, 42, 71, - /* 30 */ 71, 937, 1224, 71, 71, 71, 71, 1462, 1493, 938, - /* 40 */ 820, 453, 6, 121, 122, 112, 1165, 1165, 1006, 1009, - /* 50 */ 999, 999, 119, 119, 120, 120, 120, 120, 1543, 392, - /* 60 */ 1358, 1517, 552, 2, 1193, 194, 528, 436, 143, 291, - /* 70 */ 528, 136, 528, 371, 261, 504, 272, 385, 1273, 527, - /* 80 */ 503, 493, 164, 121, 122, 112, 1165, 1165, 1006, 1009, - /* 90 */ 999, 999, 119, 119, 120, 120, 120, 120, 1358, 442, - /* 100 */ 1514, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 110 */ 115, 424, 266, 266, 266, 266, 1498, 358, 1500, 435, - /* 120 */ 357, 1498, 517, 524, 1485, 543, 1114, 543, 1114, 392, - /* 130 */ 405, 241, 208, 114, 111, 211, 98, 290, 537, 221, - /* 140 */ 1029, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 150 */ 115, 424, 1142, 121, 122, 112, 1165, 1165, 1006, 1009, - /* 160 */ 999, 999, 119, 119, 120, 120, 120, 120, 406, 428, - /* 170 */ 117, 117, 116, 116, 116, 115, 424, 1418, 468, 123, - /* 180 */ 118, 118, 118, 118, 117, 117, 116, 116, 116, 115, - /* 190 */ 424, 116, 116, 116, 115, 424, 540, 540, 540, 392, - /* 200 */ 505, 120, 120, 120, 120, 113, 1051, 1142, 1143, 1144, - /* 210 */ 1051, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 220 */ 115, 424, 1461, 121, 122, 112, 1165, 1165, 1006, 1009, - /* 230 */ 999, 999, 119, 119, 120, 120, 120, 120, 392, 444, - /* 240 */ 316, 83, 463, 81, 359, 382, 1142, 80, 118, 118, - /* 250 */ 118, 118, 117, 117, 116, 116, 116, 115, 424, 179, - /* 260 */ 434, 424, 121, 122, 112, 1165, 1165, 1006, 1009, 999, - /* 270 */ 999, 119, 119, 120, 120, 120, 120, 434, 433, 266, - /* 280 */ 266, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 290 */ 115, 424, 543, 1109, 903, 506, 1142, 114, 111, 211, - /* 300 */ 1431, 1142, 1143, 1144, 206, 491, 1109, 392, 449, 1109, - /* 310 */ 545, 330, 120, 120, 120, 120, 298, 1431, 1433, 17, - /* 320 */ 118, 118, 118, 118, 117, 117, 116, 116, 116, 115, - /* 330 */ 424, 121, 122, 112, 1165, 1165, 1006, 1009, 999, 999, - /* 340 */ 119, 119, 120, 120, 120, 120, 392, 1358, 434, 1142, - /* 350 */ 482, 1142, 1143, 1144, 996, 996, 1007, 1010, 445, 118, - /* 360 */ 118, 118, 118, 117, 117, 116, 116, 116, 115, 424, - /* 370 */ 121, 122, 112, 1165, 1165, 1006, 1009, 999, 999, 119, - /* 380 */ 119, 120, 120, 120, 120, 1054, 1054, 465, 1431, 118, - /* 390 */ 118, 118, 118, 117, 117, 116, 116, 116, 115, 424, - /* 400 */ 1142, 451, 546, 1426, 1142, 1143, 1144, 233, 966, 1142, - /* 410 */ 481, 478, 477, 171, 360, 392, 164, 407, 414, 842, - /* 420 */ 476, 164, 185, 334, 71, 71, 1243, 1000, 118, 118, - /* 430 */ 118, 118, 117, 117, 116, 116, 116, 115, 424, 121, - /* 440 */ 122, 112, 1165, 1165, 1006, 1009, 999, 999, 119, 119, - /* 450 */ 120, 120, 120, 120, 392, 1142, 1143, 1144, 835, 12, - /* 460 */ 314, 509, 163, 356, 1142, 1143, 1144, 114, 111, 211, - /* 470 */ 508, 290, 537, 546, 276, 180, 290, 537, 121, 122, - /* 480 */ 112, 1165, 1165, 1006, 1009, 999, 999, 119, 119, 120, - /* 490 */ 120, 120, 120, 345, 484, 71, 71, 118, 118, 118, - /* 500 */ 118, 117, 117, 116, 116, 116, 115, 424, 1142, 209, - /* 510 */ 411, 523, 1142, 1109, 1571, 378, 252, 269, 342, 487, - /* 520 */ 337, 486, 238, 392, 513, 364, 1109, 1127, 333, 1109, - /* 530 */ 191, 409, 286, 32, 457, 443, 118, 118, 118, 118, - /* 540 */ 117, 117, 116, 116, 116, 115, 424, 121, 122, 112, - /* 550 */ 1165, 1165, 1006, 1009, 999, 999, 119, 119, 120, 120, - /* 560 */ 120, 120, 392, 1142, 1143, 1144, 987, 1142, 1143, 1144, - /* 570 */ 1142, 233, 492, 1492, 481, 478, 477, 6, 163, 546, - /* 580 */ 512, 546, 115, 424, 476, 5, 121, 122, 112, 1165, - /* 590 */ 1165, 1006, 1009, 999, 999, 119, 119, 120, 120, 120, - /* 600 */ 120, 13, 13, 13, 13, 118, 118, 118, 118, 117, - /* 610 */ 117, 116, 116, 116, 115, 424, 403, 502, 408, 546, - /* 620 */ 1486, 544, 1142, 892, 892, 1142, 1143, 1144, 1473, 1142, - /* 630 */ 275, 392, 808, 809, 810, 971, 422, 422, 422, 16, - /* 640 */ 16, 55, 55, 1242, 118, 118, 118, 118, 117, 117, - /* 650 */ 116, 116, 116, 115, 424, 121, 122, 112, 1165, 1165, - /* 660 */ 1006, 1009, 999, 999, 119, 119, 120, 120, 120, 120, - /* 670 */ 392, 1189, 1, 1, 552, 2, 1193, 1142, 1143, 1144, - /* 680 */ 194, 291, 898, 136, 1142, 1143, 1144, 897, 521, 1492, - /* 690 */ 1273, 3, 380, 6, 121, 122, 112, 1165, 1165, 1006, - /* 700 */ 1009, 999, 999, 119, 119, 120, 120, 120, 120, 858, - /* 710 */ 546, 924, 546, 118, 118, 118, 118, 117, 117, 116, - /* 720 */ 116, 116, 115, 424, 266, 266, 1092, 1569, 1142, 551, - /* 730 */ 1569, 1193, 13, 13, 13, 13, 291, 543, 136, 392, - /* 740 */ 485, 421, 420, 966, 344, 1273, 468, 410, 859, 279, - /* 750 */ 140, 221, 118, 118, 118, 118, 117, 117, 116, 116, - /* 760 */ 116, 115, 424, 121, 122, 112, 1165, 1165, 1006, 1009, - /* 770 */ 999, 999, 119, 119, 120, 120, 120, 120, 546, 266, - /* 780 */ 266, 428, 392, 1142, 1143, 1144, 1172, 830, 1172, 468, - /* 790 */ 431, 145, 543, 1146, 401, 314, 439, 302, 838, 1490, - /* 800 */ 71, 71, 412, 6, 1090, 473, 221, 100, 112, 1165, - /* 810 */ 1165, 1006, 1009, 999, 999, 119, 119, 120, 120, 120, - /* 820 */ 120, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 830 */ 115, 424, 237, 1425, 546, 451, 428, 287, 986, 546, - /* 840 */ 236, 235, 234, 830, 97, 529, 429, 1265, 1265, 1146, - /* 850 */ 494, 307, 430, 838, 977, 546, 71, 71, 976, 1241, - /* 860 */ 546, 51, 51, 300, 118, 118, 118, 118, 117, 117, - /* 870 */ 116, 116, 116, 115, 424, 194, 103, 70, 70, 266, - /* 880 */ 266, 546, 71, 71, 266, 266, 30, 391, 344, 976, - /* 890 */ 976, 978, 543, 528, 1109, 328, 392, 543, 495, 397, - /* 900 */ 1470, 195, 530, 13, 13, 1358, 240, 1109, 277, 280, - /* 910 */ 1109, 280, 304, 457, 306, 333, 392, 31, 188, 419, - /* 920 */ 121, 122, 112, 1165, 1165, 1006, 1009, 999, 999, 119, - /* 930 */ 119, 120, 120, 120, 120, 142, 392, 365, 457, 986, - /* 940 */ 121, 122, 112, 1165, 1165, 1006, 1009, 999, 999, 119, - /* 950 */ 119, 120, 120, 120, 120, 977, 323, 1142, 326, 976, - /* 960 */ 121, 110, 112, 1165, 1165, 1006, 1009, 999, 999, 119, - /* 970 */ 119, 120, 120, 120, 120, 464, 377, 1185, 118, 118, - /* 980 */ 118, 118, 117, 117, 116, 116, 116, 115, 424, 1142, - /* 990 */ 976, 976, 978, 305, 9, 366, 244, 362, 118, 118, - /* 1000 */ 118, 118, 117, 117, 116, 116, 116, 115, 424, 313, - /* 1010 */ 546, 344, 1142, 1143, 1144, 299, 290, 537, 118, 118, - /* 1020 */ 118, 118, 117, 117, 116, 116, 116, 115, 424, 1263, - /* 1030 */ 1263, 1163, 13, 13, 278, 421, 420, 468, 392, 923, - /* 1040 */ 260, 260, 289, 1169, 1142, 1143, 1144, 189, 1171, 266, - /* 1050 */ 266, 468, 390, 543, 1186, 546, 1170, 263, 144, 489, - /* 1060 */ 922, 546, 543, 122, 112, 1165, 1165, 1006, 1009, 999, - /* 1070 */ 999, 119, 119, 120, 120, 120, 120, 71, 71, 1142, - /* 1080 */ 1172, 1272, 1172, 13, 13, 898, 1070, 1163, 546, 468, - /* 1090 */ 897, 107, 538, 1491, 4, 1268, 1109, 6, 525, 1049, - /* 1100 */ 12, 1071, 1092, 1570, 312, 455, 1570, 520, 541, 1109, - /* 1110 */ 56, 56, 1109, 1489, 423, 1358, 1072, 6, 345, 285, - /* 1120 */ 118, 118, 118, 118, 117, 117, 116, 116, 116, 115, - /* 1130 */ 424, 425, 1271, 321, 1142, 1143, 1144, 878, 266, 266, - /* 1140 */ 1277, 107, 538, 535, 4, 1488, 293, 879, 1211, 6, - /* 1150 */ 210, 543, 543, 164, 294, 496, 416, 204, 541, 267, - /* 1160 */ 267, 1214, 398, 511, 499, 204, 266, 266, 396, 531, - /* 1170 */ 8, 986, 543, 519, 546, 922, 458, 105, 105, 543, - /* 1180 */ 1090, 425, 266, 266, 106, 417, 425, 548, 547, 266, - /* 1190 */ 266, 976, 518, 535, 1373, 543, 15, 15, 266, 266, - /* 1200 */ 456, 1120, 543, 266, 266, 1070, 1372, 515, 290, 537, - /* 1210 */ 546, 543, 514, 97, 444, 316, 543, 546, 922, 125, - /* 1220 */ 1071, 986, 976, 976, 978, 979, 27, 105, 105, 401, - /* 1230 */ 343, 1511, 44, 44, 106, 1072, 425, 548, 547, 57, - /* 1240 */ 57, 976, 343, 1511, 107, 538, 546, 4, 462, 401, - /* 1250 */ 214, 1120, 459, 297, 377, 1091, 534, 1309, 546, 539, - /* 1260 */ 398, 541, 290, 537, 104, 244, 102, 526, 58, 58, - /* 1270 */ 546, 199, 976, 976, 978, 979, 27, 1516, 1131, 427, - /* 1280 */ 59, 59, 270, 237, 425, 138, 95, 375, 375, 374, - /* 1290 */ 255, 372, 60, 60, 817, 1180, 535, 546, 273, 546, - /* 1300 */ 1163, 1308, 389, 388, 546, 438, 546, 215, 210, 296, - /* 1310 */ 515, 849, 546, 265, 208, 516, 1476, 295, 274, 61, - /* 1320 */ 61, 62, 62, 308, 986, 109, 45, 45, 46, 46, - /* 1330 */ 105, 105, 1186, 922, 47, 47, 341, 106, 546, 425, - /* 1340 */ 548, 547, 1542, 546, 976, 867, 340, 217, 546, 937, - /* 1350 */ 397, 107, 538, 218, 4, 156, 1163, 938, 158, 546, - /* 1360 */ 49, 49, 1162, 546, 268, 50, 50, 546, 541, 1450, - /* 1370 */ 63, 63, 546, 1449, 216, 976, 976, 978, 979, 27, - /* 1380 */ 446, 64, 64, 546, 460, 65, 65, 546, 318, 14, - /* 1390 */ 14, 425, 1305, 546, 66, 66, 1087, 546, 141, 379, - /* 1400 */ 38, 546, 963, 535, 322, 127, 127, 546, 393, 67, - /* 1410 */ 67, 546, 325, 290, 537, 52, 52, 515, 546, 68, - /* 1420 */ 68, 845, 514, 69, 69, 399, 165, 857, 856, 53, - /* 1430 */ 53, 986, 311, 151, 151, 97, 432, 105, 105, 327, - /* 1440 */ 152, 152, 526, 1048, 106, 1048, 425, 548, 547, 1131, - /* 1450 */ 427, 976, 1032, 270, 968, 239, 329, 243, 375, 375, - /* 1460 */ 374, 255, 372, 940, 941, 817, 1296, 546, 220, 546, - /* 1470 */ 107, 538, 546, 4, 546, 1256, 199, 845, 215, 1036, - /* 1480 */ 296, 1530, 976, 976, 978, 979, 27, 541, 295, 76, - /* 1490 */ 76, 54, 54, 980, 72, 72, 128, 128, 864, 865, - /* 1500 */ 107, 538, 546, 4, 1047, 546, 1047, 533, 469, 546, - /* 1510 */ 425, 546, 450, 1240, 546, 243, 546, 541, 217, 546, - /* 1520 */ 452, 197, 535, 243, 73, 73, 156, 129, 129, 158, - /* 1530 */ 336, 130, 130, 126, 126, 1036, 150, 150, 149, 149, - /* 1540 */ 425, 134, 134, 317, 474, 216, 97, 239, 331, 980, - /* 1550 */ 986, 97, 535, 346, 347, 546, 105, 105, 902, 931, - /* 1560 */ 546, 895, 243, 106, 109, 425, 548, 547, 546, 1505, - /* 1570 */ 976, 828, 99, 538, 139, 4, 546, 133, 133, 393, - /* 1580 */ 986, 1317, 131, 131, 290, 537, 105, 105, 1357, 541, - /* 1590 */ 132, 132, 1292, 106, 1303, 425, 548, 547, 75, 75, - /* 1600 */ 976, 976, 976, 978, 979, 27, 546, 432, 896, 1289, - /* 1610 */ 532, 109, 425, 1363, 546, 1221, 1213, 1202, 258, 546, - /* 1620 */ 349, 546, 1201, 11, 535, 1203, 1524, 351, 77, 77, - /* 1630 */ 376, 976, 976, 978, 979, 27, 74, 74, 353, 213, - /* 1640 */ 301, 43, 43, 48, 48, 437, 310, 201, 303, 1350, - /* 1650 */ 315, 355, 986, 454, 479, 1239, 339, 192, 105, 105, - /* 1660 */ 1422, 1421, 193, 536, 205, 106, 1527, 425, 548, 547, - /* 1670 */ 1180, 167, 976, 270, 247, 1469, 1467, 1177, 375, 375, - /* 1680 */ 374, 255, 372, 200, 369, 817, 400, 83, 79, 82, - /* 1690 */ 1427, 448, 177, 95, 1342, 161, 169, 1339, 215, 440, - /* 1700 */ 296, 172, 173, 976, 976, 978, 979, 27, 295, 174, - /* 1710 */ 175, 441, 472, 223, 1347, 383, 35, 381, 36, 461, - /* 1720 */ 88, 1353, 181, 447, 384, 1416, 227, 467, 259, 229, - /* 1730 */ 186, 488, 470, 324, 1250, 230, 231, 320, 217, 1204, - /* 1740 */ 1438, 1259, 386, 1258, 413, 90, 156, 849, 1541, 158, - /* 1750 */ 206, 415, 1540, 507, 1300, 1257, 94, 348, 1229, 1301, - /* 1760 */ 387, 1510, 1228, 338, 1227, 216, 350, 1539, 498, 283, - /* 1770 */ 284, 1249, 501, 1299, 352, 245, 246, 418, 1298, 354, - /* 1780 */ 1496, 1495, 124, 10, 526, 363, 101, 1324, 253, 96, - /* 1790 */ 510, 1210, 34, 549, 1137, 254, 256, 257, 166, 393, - /* 1800 */ 550, 1199, 1282, 361, 290, 537, 1281, 196, 367, 368, - /* 1810 */ 1194, 153, 1454, 137, 281, 1323, 1455, 804, 154, 426, - /* 1820 */ 198, 155, 1453, 1452, 292, 212, 202, 432, 1402, 203, - /* 1830 */ 271, 135, 288, 78, 1046, 1044, 960, 168, 157, 881, - /* 1840 */ 170, 219, 309, 222, 1060, 176, 964, 159, 402, 84, - /* 1850 */ 178, 404, 85, 86, 87, 160, 1063, 224, 394, 395, - /* 1860 */ 225, 1059, 146, 18, 226, 319, 243, 1174, 466, 228, - /* 1870 */ 1052, 182, 183, 37, 819, 471, 340, 232, 332, 483, - /* 1880 */ 184, 89, 162, 19, 20, 475, 91, 480, 847, 335, - /* 1890 */ 147, 860, 282, 92, 490, 93, 1125, 148, 1012, 1095, - /* 1900 */ 39, 497, 1096, 40, 500, 262, 207, 264, 930, 187, - /* 1910 */ 925, 109, 1111, 1115, 1113, 7, 1099, 242, 33, 1119, - /* 1920 */ 21, 522, 22, 23, 24, 1118, 25, 190, 97, 26, - /* 1930 */ 1027, 1013, 1011, 1015, 1069, 1016, 1068, 249, 248, 28, - /* 1940 */ 41, 891, 981, 829, 108, 29, 250, 542, 251, 370, - /* 1950 */ 373, 1133, 1132, 1190, 1190, 1190, 1190, 1190, 1190, 1190, - /* 1960 */ 1532, 1531, + /* 0 */ 563, 1295, 563, 1274, 168, 361, 115, 112, 218, 373, + /* 10 */ 563, 1295, 374, 563, 488, 563, 115, 112, 218, 406, + /* 20 */ 1300, 1300, 41, 41, 41, 41, 514, 1504, 520, 1298, + /* 30 */ 1298, 959, 41, 41, 1257, 71, 71, 51, 51, 960, + /* 40 */ 557, 557, 557, 122, 123, 113, 1200, 1200, 1035, 1038, + /* 50 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 414, 406, + /* 60 */ 273, 273, 273, 273, 115, 112, 218, 115, 112, 218, + /* 70 */ 197, 268, 545, 560, 515, 560, 1260, 563, 385, 248, + /* 80 */ 215, 521, 399, 122, 123, 113, 1200, 1200, 1035, 1038, + /* 90 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 540, 13, + /* 100 */ 13, 1259, 119, 119, 119, 119, 118, 118, 117, 117, + /* 110 */ 117, 116, 441, 1176, 419, 1531, 446, 137, 512, 1539, + /* 120 */ 1545, 372, 1547, 6, 371, 1176, 1148, 1584, 1148, 406, + /* 130 */ 1545, 534, 115, 112, 218, 1267, 99, 441, 121, 121, + /* 140 */ 121, 121, 119, 119, 119, 119, 118, 118, 117, 117, + /* 150 */ 117, 116, 441, 122, 123, 113, 1200, 1200, 1035, 1038, + /* 160 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 197, 1176, + /* 170 */ 1177, 1178, 241, 304, 554, 501, 498, 497, 473, 124, + /* 180 */ 394, 1176, 1177, 1178, 1176, 496, 119, 119, 119, 119, + /* 190 */ 118, 118, 117, 117, 117, 116, 441, 139, 540, 406, + /* 200 */ 121, 121, 121, 121, 114, 117, 117, 117, 116, 441, + /* 210 */ 541, 1532, 119, 119, 119, 119, 118, 118, 117, 117, + /* 220 */ 117, 116, 441, 122, 123, 113, 1200, 1200, 1035, 1038, + /* 230 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 406, 320, + /* 240 */ 1176, 1177, 1178, 81, 342, 1590, 396, 80, 119, 119, + /* 250 */ 119, 119, 118, 118, 117, 117, 117, 116, 441, 1176, + /* 260 */ 211, 450, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, + /* 270 */ 1028, 120, 120, 121, 121, 121, 121, 251, 450, 449, + /* 280 */ 273, 273, 119, 119, 119, 119, 118, 118, 117, 117, + /* 290 */ 117, 116, 441, 560, 1224, 1, 1, 569, 2, 1228, + /* 300 */ 317, 1176, 319, 1561, 305, 337, 140, 340, 406, 430, + /* 310 */ 469, 1533, 1197, 1308, 348, 1176, 1177, 1178, 168, 462, + /* 320 */ 330, 119, 119, 119, 119, 118, 118, 117, 117, 117, + /* 330 */ 116, 441, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, + /* 340 */ 1028, 120, 120, 121, 121, 121, 121, 273, 273, 563, + /* 350 */ 83, 450, 416, 1564, 569, 2, 1228, 1176, 1177, 1178, + /* 360 */ 560, 305, 471, 140, 944, 995, 860, 563, 467, 1197, + /* 370 */ 1308, 13, 13, 137, 229, 118, 118, 117, 117, 117, + /* 380 */ 116, 441, 96, 318, 946, 504, 424, 361, 562, 71, + /* 390 */ 71, 119, 119, 119, 119, 118, 118, 117, 117, 117, + /* 400 */ 116, 441, 427, 205, 273, 273, 445, 1015, 259, 276, + /* 410 */ 356, 507, 351, 506, 246, 406, 959, 560, 328, 344, + /* 420 */ 347, 315, 860, 1006, 960, 126, 545, 1005, 313, 304, + /* 430 */ 554, 229, 538, 1539, 148, 544, 281, 6, 203, 122, + /* 440 */ 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, + /* 450 */ 121, 121, 121, 121, 563, 217, 563, 12, 406, 1005, + /* 460 */ 1005, 1007, 502, 445, 119, 119, 119, 119, 118, 118, + /* 470 */ 117, 117, 117, 116, 441, 452, 71, 71, 70, 70, + /* 480 */ 944, 137, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, + /* 490 */ 1028, 120, 120, 121, 121, 121, 121, 1530, 119, 119, + /* 500 */ 119, 119, 118, 118, 117, 117, 117, 116, 441, 403, + /* 510 */ 402, 241, 1176, 545, 501, 498, 497, 1468, 1143, 451, + /* 520 */ 267, 267, 513, 1540, 496, 142, 1176, 6, 406, 530, + /* 530 */ 194, 1143, 864, 560, 1143, 461, 182, 304, 554, 32, + /* 540 */ 379, 119, 119, 119, 119, 118, 118, 117, 117, 117, + /* 550 */ 116, 441, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, + /* 560 */ 1028, 120, 120, 121, 121, 121, 121, 406, 1176, 1177, + /* 570 */ 1178, 857, 568, 1176, 1228, 925, 1176, 454, 361, 305, + /* 580 */ 189, 140, 1176, 1177, 1178, 519, 529, 404, 1308, 183, + /* 590 */ 1015, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, + /* 600 */ 120, 120, 121, 121, 121, 121, 1006, 16, 16, 370, + /* 610 */ 1005, 119, 119, 119, 119, 118, 118, 117, 117, 117, + /* 620 */ 116, 441, 273, 273, 1537, 150, 1176, 98, 6, 1176, + /* 630 */ 1177, 1178, 1176, 1177, 1178, 560, 380, 406, 376, 438, + /* 640 */ 437, 1161, 1005, 1005, 1007, 1025, 1025, 1036, 1039, 229, + /* 650 */ 119, 119, 119, 119, 118, 118, 117, 117, 117, 116, + /* 660 */ 441, 122, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, + /* 670 */ 120, 120, 121, 121, 121, 121, 406, 1143, 1619, 392, + /* 680 */ 1016, 445, 1176, 1177, 1178, 1207, 525, 1207, 1530, 995, + /* 690 */ 1143, 304, 554, 1143, 5, 563, 543, 3, 361, 216, + /* 700 */ 122, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, + /* 710 */ 120, 121, 121, 121, 121, 143, 563, 13, 13, 1029, + /* 720 */ 119, 119, 119, 119, 118, 118, 117, 117, 117, 116, + /* 730 */ 441, 1176, 426, 563, 1176, 563, 274, 274, 13, 13, + /* 740 */ 1078, 1176, 328, 457, 316, 147, 406, 211, 361, 560, + /* 750 */ 1000, 213, 511, 293, 477, 55, 55, 71, 71, 119, + /* 760 */ 119, 119, 119, 118, 118, 117, 117, 117, 116, 441, + /* 770 */ 122, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, + /* 780 */ 120, 121, 121, 121, 121, 406, 455, 1176, 1177, 1178, + /* 790 */ 1176, 1177, 1178, 471, 526, 149, 404, 1176, 1177, 1178, + /* 800 */ 105, 270, 103, 563, 944, 563, 116, 441, 1530, 122, + /* 810 */ 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, + /* 820 */ 121, 121, 121, 121, 945, 13, 13, 13, 13, 119, + /* 830 */ 119, 119, 119, 118, 118, 117, 117, 117, 116, 441, + /* 840 */ 191, 563, 192, 563, 416, 439, 439, 439, 1083, 1083, + /* 850 */ 485, 561, 285, 914, 914, 406, 462, 330, 1530, 830, + /* 860 */ 831, 832, 206, 71, 71, 71, 71, 286, 119, 119, + /* 870 */ 119, 119, 118, 118, 117, 117, 117, 116, 441, 122, + /* 880 */ 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, + /* 890 */ 121, 121, 121, 121, 563, 217, 563, 1122, 1617, 406, + /* 900 */ 300, 1617, 301, 416, 1278, 1473, 244, 243, 242, 1249, + /* 910 */ 412, 556, 412, 282, 842, 279, 71, 71, 71, 71, + /* 920 */ 944, 1415, 1473, 1475, 101, 113, 1200, 1200, 1035, 1038, + /* 930 */ 1028, 1028, 120, 120, 121, 121, 121, 121, 119, 119, + /* 940 */ 119, 119, 118, 118, 117, 117, 117, 116, 441, 273, + /* 950 */ 273, 1099, 563, 436, 1143, 440, 563, 1122, 1618, 357, + /* 960 */ 1558, 1618, 560, 546, 488, 197, 1100, 1143, 378, 290, + /* 970 */ 1143, 1306, 284, 460, 71, 71, 1120, 405, 13, 13, + /* 980 */ 145, 1101, 119, 119, 119, 119, 118, 118, 117, 117, + /* 990 */ 117, 116, 441, 542, 104, 1473, 509, 273, 273, 294, + /* 1000 */ 1514, 294, 900, 273, 273, 273, 273, 563, 1503, 563, + /* 1010 */ 560, 545, 901, 464, 406, 1058, 560, 852, 560, 198, + /* 1020 */ 547, 1080, 920, 404, 1400, 1080, 146, 919, 38, 56, + /* 1030 */ 56, 15, 15, 563, 406, 12, 1120, 471, 122, 123, + /* 1040 */ 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, 121, + /* 1050 */ 121, 121, 121, 1460, 406, 43, 43, 483, 122, 123, + /* 1060 */ 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, 121, + /* 1070 */ 121, 121, 121, 563, 852, 9, 471, 251, 122, 111, + /* 1080 */ 113, 1200, 1200, 1035, 1038, 1028, 1028, 120, 120, 121, + /* 1090 */ 121, 121, 121, 563, 421, 57, 57, 119, 119, 119, + /* 1100 */ 119, 118, 118, 117, 117, 117, 116, 441, 1176, 493, + /* 1110 */ 563, 289, 1197, 478, 1516, 44, 44, 119, 119, 119, + /* 1120 */ 119, 118, 118, 117, 117, 117, 116, 441, 880, 563, + /* 1130 */ 536, 563, 58, 58, 488, 1414, 245, 119, 119, 119, + /* 1140 */ 119, 118, 118, 117, 117, 117, 116, 441, 563, 535, + /* 1150 */ 291, 59, 59, 60, 60, 438, 437, 406, 1154, 505, + /* 1160 */ 304, 554, 477, 1204, 1176, 1177, 1178, 881, 1206, 1197, + /* 1170 */ 61, 61, 1246, 357, 1558, 1538, 1205, 563, 1467, 6, + /* 1180 */ 1176, 488, 123, 113, 1200, 1200, 1035, 1038, 1028, 1028, + /* 1190 */ 120, 120, 121, 121, 121, 121, 1400, 1143, 410, 62, + /* 1200 */ 62, 1207, 1099, 1207, 411, 447, 273, 273, 537, 1154, + /* 1210 */ 1143, 108, 555, 1143, 4, 391, 1220, 1100, 1512, 560, + /* 1220 */ 347, 516, 428, 548, 308, 1307, 1536, 1077, 558, 1077, + /* 1230 */ 6, 488, 1101, 1400, 488, 309, 1176, 1177, 1178, 563, + /* 1240 */ 119, 119, 119, 119, 118, 118, 117, 117, 117, 116, + /* 1250 */ 441, 442, 278, 551, 563, 273, 273, 273, 273, 563, + /* 1260 */ 327, 45, 45, 552, 563, 528, 422, 563, 560, 1400, + /* 1270 */ 560, 108, 555, 137, 4, 1303, 46, 46, 335, 563, + /* 1280 */ 482, 47, 47, 477, 479, 307, 49, 49, 558, 50, + /* 1290 */ 50, 563, 1015, 563, 1221, 563, 1400, 563, 106, 106, + /* 1300 */ 8, 63, 63, 423, 563, 107, 312, 442, 565, 564, + /* 1310 */ 563, 442, 1005, 64, 64, 65, 65, 14, 14, 66, + /* 1320 */ 66, 391, 1121, 552, 1312, 1180, 128, 128, 563, 304, + /* 1330 */ 554, 563, 67, 67, 563, 359, 560, 532, 563, 484, + /* 1340 */ 563, 1196, 531, 222, 1005, 1005, 1007, 1008, 27, 522, + /* 1350 */ 52, 52, 1015, 68, 68, 563, 69, 69, 106, 106, + /* 1360 */ 53, 53, 156, 156, 563, 107, 434, 442, 565, 564, + /* 1370 */ 272, 215, 1005, 425, 563, 359, 563, 157, 157, 563, + /* 1380 */ 1535, 292, 1180, 98, 6, 1344, 76, 76, 1215, 475, + /* 1390 */ 413, 169, 226, 563, 245, 563, 54, 54, 72, 72, + /* 1400 */ 1221, 129, 129, 1343, 1005, 1005, 1007, 1008, 27, 1563, + /* 1410 */ 1165, 444, 456, 433, 277, 73, 73, 130, 130, 389, + /* 1420 */ 389, 388, 262, 386, 1165, 444, 839, 1519, 277, 108, + /* 1430 */ 555, 321, 4, 389, 389, 388, 262, 386, 563, 223, + /* 1440 */ 839, 311, 468, 84, 202, 523, 558, 1492, 303, 310, + /* 1450 */ 563, 110, 404, 223, 563, 311, 206, 30, 404, 277, + /* 1460 */ 131, 131, 411, 310, 389, 389, 388, 262, 386, 442, + /* 1470 */ 920, 839, 127, 127, 563, 919, 155, 155, 1491, 225, + /* 1480 */ 563, 552, 871, 563, 223, 476, 311, 161, 31, 563, + /* 1490 */ 135, 563, 480, 225, 310, 532, 154, 154, 332, 17, + /* 1500 */ 533, 161, 136, 136, 135, 134, 134, 224, 228, 355, + /* 1510 */ 1015, 132, 132, 133, 133, 1589, 106, 106, 889, 354, + /* 1520 */ 563, 224, 563, 107, 225, 442, 565, 564, 1117, 275, + /* 1530 */ 1005, 393, 161, 518, 563, 135, 108, 555, 417, 4, + /* 1540 */ 1340, 407, 75, 75, 77, 77, 304, 554, 867, 563, + /* 1550 */ 336, 563, 224, 558, 463, 407, 74, 74, 465, 1065, + /* 1560 */ 304, 554, 1005, 1005, 1007, 1008, 27, 962, 963, 543, + /* 1570 */ 448, 42, 42, 48, 48, 326, 442, 325, 98, 997, + /* 1580 */ 470, 287, 250, 250, 448, 1009, 407, 472, 552, 339, + /* 1590 */ 250, 304, 554, 879, 878, 331, 108, 555, 98, 4, + /* 1600 */ 1277, 494, 532, 345, 247, 867, 98, 531, 341, 886, + /* 1610 */ 887, 1126, 1076, 558, 1076, 448, 1065, 1015, 1061, 953, + /* 1620 */ 343, 247, 250, 106, 106, 1291, 917, 1276, 850, 110, + /* 1630 */ 107, 144, 442, 565, 564, 918, 442, 1005, 110, 1275, + /* 1640 */ 350, 360, 1009, 1331, 1352, 299, 1399, 1577, 552, 1327, + /* 1650 */ 1552, 550, 1338, 549, 1405, 1256, 1248, 1237, 1236, 1238, + /* 1660 */ 1571, 489, 265, 200, 1324, 363, 365, 367, 11, 1005, + /* 1670 */ 1005, 1007, 1008, 27, 390, 221, 1386, 1015, 280, 1391, + /* 1680 */ 1381, 208, 323, 106, 106, 924, 1374, 453, 283, 324, + /* 1690 */ 107, 474, 442, 565, 564, 1390, 499, 1005, 212, 288, + /* 1700 */ 1274, 397, 353, 108, 555, 195, 4, 1464, 369, 1463, + /* 1710 */ 1574, 1215, 1212, 329, 553, 171, 207, 383, 1511, 196, + /* 1720 */ 558, 254, 1509, 415, 100, 555, 83, 4, 204, 1005, + /* 1730 */ 1005, 1007, 1008, 27, 219, 79, 82, 1469, 180, 166, + /* 1740 */ 173, 558, 458, 442, 175, 176, 177, 178, 35, 1387, + /* 1750 */ 492, 459, 231, 1395, 96, 552, 1393, 1392, 395, 184, + /* 1760 */ 481, 466, 36, 235, 442, 89, 398, 266, 487, 1480, + /* 1770 */ 1458, 237, 188, 338, 508, 429, 552, 490, 400, 238, + /* 1780 */ 334, 1239, 239, 1294, 1015, 1293, 1292, 1285, 91, 871, + /* 1790 */ 106, 106, 213, 431, 1588, 432, 524, 107, 517, 442, + /* 1800 */ 565, 564, 401, 1264, 1005, 1015, 1263, 1587, 352, 1262, + /* 1810 */ 1557, 106, 106, 1586, 1284, 297, 298, 358, 107, 1335, + /* 1820 */ 442, 565, 564, 95, 362, 1005, 253, 252, 435, 125, + /* 1830 */ 543, 10, 1444, 1543, 377, 1542, 1005, 1005, 1007, 1008, + /* 1840 */ 27, 302, 102, 97, 527, 1336, 260, 1317, 364, 1245, + /* 1850 */ 1334, 34, 566, 1171, 366, 381, 375, 1005, 1005, 1007, + /* 1860 */ 1008, 27, 1333, 1359, 368, 1316, 199, 382, 261, 263, + /* 1870 */ 264, 1358, 158, 1496, 141, 1497, 1495, 567, 1234, 1229, + /* 1880 */ 1494, 295, 159, 209, 210, 78, 826, 443, 201, 306, + /* 1890 */ 220, 1075, 138, 1073, 160, 314, 162, 172, 1196, 174, + /* 1900 */ 903, 227, 230, 322, 1089, 179, 163, 164, 418, 85, + /* 1910 */ 420, 181, 170, 408, 409, 86, 87, 165, 88, 1092, + /* 1920 */ 232, 233, 1088, 151, 18, 234, 1081, 250, 333, 185, + /* 1930 */ 1209, 486, 236, 186, 37, 841, 491, 354, 240, 346, + /* 1940 */ 503, 187, 90, 167, 19, 495, 20, 869, 500, 349, + /* 1950 */ 92, 882, 296, 152, 93, 510, 1127, 1159, 153, 1041, + /* 1960 */ 214, 1128, 39, 94, 269, 271, 952, 190, 947, 110, + /* 1970 */ 1149, 1145, 1153, 249, 1133, 1147, 7, 33, 21, 193, + /* 1980 */ 22, 23, 24, 25, 1152, 539, 98, 1056, 26, 1042, + /* 1990 */ 1040, 1044, 1098, 1045, 1097, 256, 255, 28, 40, 387, + /* 2000 */ 1010, 851, 109, 29, 1167, 559, 384, 257, 913, 258, + /* 2010 */ 1166, 1579, 1225, 1225, 1225, 1225, 1225, 1225, 1225, 1578, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 189, 211, 189, 189, 218, 189, 220, 189, 267, 268, - /* 10 */ 269, 189, 210, 189, 228, 189, 267, 268, 269, 19, - /* 20 */ 218, 189, 211, 212, 211, 212, 211, 211, 212, 211, - /* 30 */ 212, 31, 211, 211, 212, 211, 212, 288, 300, 39, - /* 40 */ 21, 189, 304, 43, 44, 45, 46, 47, 48, 49, - /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 225, 19, - /* 60 */ 189, 183, 184, 185, 186, 189, 248, 263, 236, 191, - /* 70 */ 248, 193, 248, 197, 208, 257, 262, 201, 200, 257, - /* 80 */ 200, 257, 81, 43, 44, 45, 46, 47, 48, 49, - /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 189, 80, - /* 100 */ 189, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 110 */ 110, 111, 234, 235, 234, 235, 305, 306, 305, 118, - /* 120 */ 307, 305, 306, 297, 298, 247, 86, 247, 88, 19, - /* 130 */ 259, 251, 252, 267, 268, 269, 26, 136, 137, 261, - /* 140 */ 121, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 150 */ 110, 111, 59, 43, 44, 45, 46, 47, 48, 49, - /* 160 */ 50, 51, 52, 53, 54, 55, 56, 57, 259, 291, - /* 170 */ 105, 106, 107, 108, 109, 110, 111, 158, 189, 69, - /* 180 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 190 */ 111, 107, 108, 109, 110, 111, 205, 206, 207, 19, - /* 200 */ 19, 54, 55, 56, 57, 58, 29, 114, 115, 116, - /* 210 */ 33, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 220 */ 110, 111, 233, 43, 44, 45, 46, 47, 48, 49, - /* 230 */ 50, 51, 52, 53, 54, 55, 56, 57, 19, 126, - /* 240 */ 127, 148, 65, 24, 214, 200, 59, 67, 101, 102, - /* 250 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 22, - /* 260 */ 189, 111, 43, 44, 45, 46, 47, 48, 49, 50, - /* 270 */ 51, 52, 53, 54, 55, 56, 57, 206, 207, 234, - /* 280 */ 235, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 290 */ 110, 111, 247, 76, 107, 114, 59, 267, 268, 269, - /* 300 */ 189, 114, 115, 116, 162, 163, 89, 19, 263, 92, - /* 310 */ 189, 23, 54, 55, 56, 57, 189, 206, 207, 22, - /* 320 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 330 */ 111, 43, 44, 45, 46, 47, 48, 49, 50, 51, - /* 340 */ 52, 53, 54, 55, 56, 57, 19, 189, 277, 59, - /* 350 */ 23, 114, 115, 116, 46, 47, 48, 49, 61, 101, - /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 370 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 380 */ 53, 54, 55, 56, 57, 125, 126, 127, 277, 101, - /* 390 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 400 */ 59, 189, 189, 276, 114, 115, 116, 117, 73, 59, - /* 410 */ 120, 121, 122, 72, 214, 19, 81, 259, 19, 23, - /* 420 */ 130, 81, 72, 24, 211, 212, 221, 119, 101, 102, - /* 430 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 43, + /* 0 */ 191, 220, 191, 222, 191, 191, 271, 272, 273, 216, + /* 10 */ 191, 230, 216, 191, 191, 191, 271, 272, 273, 19, + /* 20 */ 232, 233, 213, 214, 213, 214, 202, 292, 202, 232, + /* 30 */ 233, 31, 213, 214, 213, 213, 214, 213, 214, 39, + /* 40 */ 207, 208, 209, 43, 44, 45, 46, 47, 48, 49, + /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 235, 19, + /* 60 */ 236, 237, 236, 237, 271, 272, 273, 271, 272, 273, + /* 70 */ 191, 210, 250, 249, 250, 249, 213, 191, 199, 253, + /* 80 */ 254, 259, 203, 43, 44, 45, 46, 47, 48, 49, + /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 191, 213, + /* 100 */ 214, 213, 102, 103, 104, 105, 106, 107, 108, 109, + /* 110 */ 110, 111, 112, 59, 228, 301, 293, 81, 305, 306, + /* 120 */ 311, 312, 311, 310, 313, 59, 86, 212, 88, 19, + /* 130 */ 311, 312, 271, 272, 273, 220, 26, 112, 54, 55, + /* 140 */ 56, 57, 102, 103, 104, 105, 106, 107, 108, 109, + /* 150 */ 110, 111, 112, 43, 44, 45, 46, 47, 48, 49, + /* 160 */ 50, 51, 52, 53, 54, 55, 56, 57, 191, 115, + /* 170 */ 116, 117, 118, 137, 138, 121, 122, 123, 191, 69, + /* 180 */ 203, 115, 116, 117, 59, 131, 102, 103, 104, 105, + /* 190 */ 106, 107, 108, 109, 110, 111, 112, 72, 191, 19, + /* 200 */ 54, 55, 56, 57, 58, 108, 109, 110, 111, 112, + /* 210 */ 303, 304, 102, 103, 104, 105, 106, 107, 108, 109, + /* 220 */ 110, 111, 112, 43, 44, 45, 46, 47, 48, 49, + /* 230 */ 50, 51, 52, 53, 54, 55, 56, 57, 19, 16, + /* 240 */ 115, 116, 117, 24, 16, 227, 202, 67, 102, 103, + /* 250 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 59, + /* 260 */ 26, 191, 43, 44, 45, 46, 47, 48, 49, 50, + /* 270 */ 51, 52, 53, 54, 55, 56, 57, 24, 208, 209, + /* 280 */ 236, 237, 102, 103, 104, 105, 106, 107, 108, 109, + /* 290 */ 110, 111, 112, 249, 183, 184, 185, 186, 187, 188, + /* 300 */ 77, 59, 79, 191, 193, 77, 195, 79, 19, 19, + /* 310 */ 266, 304, 59, 202, 24, 115, 116, 117, 191, 127, + /* 320 */ 128, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 330 */ 111, 112, 43, 44, 45, 46, 47, 48, 49, 50, + /* 340 */ 51, 52, 53, 54, 55, 56, 57, 236, 237, 191, + /* 350 */ 150, 281, 191, 185, 186, 187, 188, 115, 116, 117, + /* 360 */ 249, 193, 191, 195, 26, 73, 59, 191, 114, 116, + /* 370 */ 202, 213, 214, 81, 263, 106, 107, 108, 109, 110, + /* 380 */ 111, 112, 148, 160, 142, 95, 228, 191, 191, 213, + /* 390 */ 214, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 400 */ 111, 112, 112, 149, 236, 237, 295, 100, 118, 119, + /* 410 */ 120, 121, 122, 123, 124, 19, 31, 249, 126, 23, + /* 420 */ 130, 260, 115, 116, 39, 22, 250, 120, 191, 137, + /* 430 */ 138, 263, 305, 306, 238, 259, 265, 310, 149, 43, /* 440 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 450 */ 54, 55, 56, 57, 19, 114, 115, 116, 23, 208, - /* 460 */ 125, 248, 189, 189, 114, 115, 116, 267, 268, 269, - /* 470 */ 189, 136, 137, 189, 262, 22, 136, 137, 43, 44, - /* 480 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - /* 490 */ 55, 56, 57, 189, 95, 211, 212, 101, 102, 103, - /* 500 */ 104, 105, 106, 107, 108, 109, 110, 111, 59, 189, - /* 510 */ 111, 189, 59, 76, 294, 295, 117, 118, 119, 120, - /* 520 */ 121, 122, 123, 19, 87, 189, 89, 23, 129, 92, - /* 530 */ 279, 227, 248, 22, 189, 284, 101, 102, 103, 104, - /* 540 */ 105, 106, 107, 108, 109, 110, 111, 43, 44, 45, - /* 550 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - /* 560 */ 56, 57, 19, 114, 115, 116, 23, 114, 115, 116, - /* 570 */ 59, 117, 299, 300, 120, 121, 122, 304, 189, 189, - /* 580 */ 143, 189, 110, 111, 130, 22, 43, 44, 45, 46, - /* 590 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 600 */ 57, 211, 212, 211, 212, 101, 102, 103, 104, 105, - /* 610 */ 106, 107, 108, 109, 110, 111, 226, 189, 226, 189, - /* 620 */ 298, 132, 59, 134, 135, 114, 115, 116, 189, 59, - /* 630 */ 285, 19, 7, 8, 9, 23, 205, 206, 207, 211, - /* 640 */ 212, 211, 212, 221, 101, 102, 103, 104, 105, 106, - /* 650 */ 107, 108, 109, 110, 111, 43, 44, 45, 46, 47, - /* 660 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 670 */ 19, 181, 182, 183, 184, 185, 186, 114, 115, 116, - /* 680 */ 189, 191, 133, 193, 114, 115, 116, 138, 299, 300, - /* 690 */ 200, 22, 201, 304, 43, 44, 45, 46, 47, 48, - /* 700 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 35, - /* 710 */ 189, 141, 189, 101, 102, 103, 104, 105, 106, 107, - /* 720 */ 108, 109, 110, 111, 234, 235, 22, 23, 59, 184, - /* 730 */ 26, 186, 211, 212, 211, 212, 191, 247, 193, 19, - /* 740 */ 66, 105, 106, 73, 189, 200, 189, 226, 74, 226, - /* 750 */ 22, 261, 101, 102, 103, 104, 105, 106, 107, 108, - /* 760 */ 109, 110, 111, 43, 44, 45, 46, 47, 48, 49, - /* 770 */ 50, 51, 52, 53, 54, 55, 56, 57, 189, 234, - /* 780 */ 235, 291, 19, 114, 115, 116, 150, 59, 152, 189, - /* 790 */ 233, 236, 247, 59, 189, 125, 126, 127, 59, 300, - /* 800 */ 211, 212, 128, 304, 100, 19, 261, 156, 45, 46, - /* 810 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 820 */ 57, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 830 */ 110, 111, 46, 233, 189, 189, 291, 248, 99, 189, - /* 840 */ 125, 126, 127, 115, 26, 200, 289, 230, 231, 115, - /* 850 */ 200, 16, 189, 114, 115, 189, 211, 212, 119, 221, - /* 860 */ 189, 211, 212, 258, 101, 102, 103, 104, 105, 106, - /* 870 */ 107, 108, 109, 110, 111, 189, 156, 211, 212, 234, - /* 880 */ 235, 189, 211, 212, 234, 235, 22, 201, 189, 150, - /* 890 */ 151, 152, 247, 248, 76, 16, 19, 247, 248, 113, - /* 900 */ 189, 24, 257, 211, 212, 189, 26, 89, 262, 223, - /* 910 */ 92, 225, 77, 189, 79, 129, 19, 53, 226, 248, - /* 920 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 930 */ 53, 54, 55, 56, 57, 236, 19, 271, 189, 99, - /* 940 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 950 */ 53, 54, 55, 56, 57, 115, 77, 59, 79, 119, - /* 960 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 970 */ 53, 54, 55, 56, 57, 259, 22, 23, 101, 102, - /* 980 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 59, - /* 990 */ 150, 151, 152, 158, 22, 244, 24, 246, 101, 102, - /* 1000 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 285, - /* 1010 */ 189, 189, 114, 115, 116, 200, 136, 137, 101, 102, - /* 1020 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 230, - /* 1030 */ 231, 59, 211, 212, 285, 105, 106, 189, 19, 141, - /* 1040 */ 234, 235, 239, 113, 114, 115, 116, 226, 118, 234, - /* 1050 */ 235, 189, 249, 247, 100, 189, 126, 23, 236, 107, - /* 1060 */ 26, 189, 247, 44, 45, 46, 47, 48, 49, 50, - /* 1070 */ 51, 52, 53, 54, 55, 56, 57, 211, 212, 59, - /* 1080 */ 150, 233, 152, 211, 212, 133, 12, 115, 189, 189, - /* 1090 */ 138, 19, 20, 300, 22, 233, 76, 304, 226, 11, - /* 1100 */ 208, 27, 22, 23, 200, 19, 26, 87, 36, 89, - /* 1110 */ 211, 212, 92, 300, 248, 189, 42, 304, 189, 250, - /* 1120 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 1130 */ 111, 59, 200, 233, 114, 115, 116, 63, 234, 235, - /* 1140 */ 235, 19, 20, 71, 22, 300, 189, 73, 200, 304, - /* 1150 */ 116, 247, 247, 81, 189, 200, 227, 26, 36, 234, - /* 1160 */ 235, 203, 204, 143, 200, 26, 234, 235, 194, 200, - /* 1170 */ 48, 99, 247, 66, 189, 141, 284, 105, 106, 247, - /* 1180 */ 100, 59, 234, 235, 112, 259, 114, 115, 116, 234, - /* 1190 */ 235, 119, 85, 71, 266, 247, 211, 212, 234, 235, - /* 1200 */ 114, 94, 247, 234, 235, 12, 266, 85, 136, 137, - /* 1210 */ 189, 247, 90, 26, 126, 127, 247, 189, 26, 22, - /* 1220 */ 27, 99, 150, 151, 152, 153, 154, 105, 106, 189, - /* 1230 */ 302, 303, 211, 212, 112, 42, 114, 115, 116, 211, - /* 1240 */ 212, 119, 302, 303, 19, 20, 189, 22, 274, 189, - /* 1250 */ 15, 144, 278, 189, 22, 23, 63, 189, 189, 203, - /* 1260 */ 204, 36, 136, 137, 155, 24, 157, 143, 211, 212, - /* 1270 */ 189, 140, 150, 151, 152, 153, 154, 0, 1, 2, - /* 1280 */ 211, 212, 5, 46, 59, 161, 147, 10, 11, 12, - /* 1290 */ 13, 14, 211, 212, 17, 60, 71, 189, 258, 189, - /* 1300 */ 59, 189, 105, 106, 189, 189, 189, 30, 116, 32, - /* 1310 */ 85, 124, 189, 251, 252, 90, 189, 40, 258, 211, - /* 1320 */ 212, 211, 212, 189, 99, 26, 211, 212, 211, 212, - /* 1330 */ 105, 106, 100, 141, 211, 212, 119, 112, 189, 114, - /* 1340 */ 115, 116, 23, 189, 119, 26, 129, 70, 189, 31, - /* 1350 */ 113, 19, 20, 24, 22, 78, 115, 39, 81, 189, - /* 1360 */ 211, 212, 26, 189, 22, 211, 212, 189, 36, 189, - /* 1370 */ 211, 212, 189, 189, 97, 150, 151, 152, 153, 154, - /* 1380 */ 127, 211, 212, 189, 189, 211, 212, 189, 189, 211, - /* 1390 */ 212, 59, 189, 189, 211, 212, 23, 189, 22, 26, - /* 1400 */ 24, 189, 149, 71, 189, 211, 212, 189, 131, 211, - /* 1410 */ 212, 189, 189, 136, 137, 211, 212, 85, 189, 211, - /* 1420 */ 212, 59, 90, 211, 212, 292, 293, 118, 119, 211, - /* 1430 */ 212, 99, 23, 211, 212, 26, 159, 105, 106, 189, - /* 1440 */ 211, 212, 143, 150, 112, 152, 114, 115, 116, 1, - /* 1450 */ 2, 119, 23, 5, 23, 26, 189, 26, 10, 11, - /* 1460 */ 12, 13, 14, 83, 84, 17, 253, 189, 139, 189, - /* 1470 */ 19, 20, 189, 22, 189, 189, 140, 115, 30, 59, - /* 1480 */ 32, 139, 150, 151, 152, 153, 154, 36, 40, 211, - /* 1490 */ 212, 211, 212, 59, 211, 212, 211, 212, 7, 8, - /* 1500 */ 19, 20, 189, 22, 150, 189, 152, 231, 281, 189, - /* 1510 */ 59, 189, 23, 189, 189, 26, 189, 36, 70, 189, - /* 1520 */ 23, 237, 71, 26, 211, 212, 78, 211, 212, 81, - /* 1530 */ 189, 211, 212, 211, 212, 115, 211, 212, 211, 212, - /* 1540 */ 59, 211, 212, 23, 23, 97, 26, 26, 23, 115, - /* 1550 */ 99, 26, 71, 189, 189, 189, 105, 106, 107, 23, - /* 1560 */ 189, 23, 26, 112, 26, 114, 115, 116, 189, 309, - /* 1570 */ 119, 23, 19, 20, 26, 22, 189, 211, 212, 131, - /* 1580 */ 99, 189, 211, 212, 136, 137, 105, 106, 189, 36, - /* 1590 */ 211, 212, 189, 112, 189, 114, 115, 116, 211, 212, - /* 1600 */ 119, 150, 151, 152, 153, 154, 189, 159, 23, 250, - /* 1610 */ 189, 26, 59, 189, 189, 189, 189, 189, 280, 189, - /* 1620 */ 250, 189, 189, 238, 71, 189, 189, 250, 211, 212, - /* 1630 */ 187, 150, 151, 152, 153, 154, 211, 212, 250, 290, - /* 1640 */ 240, 211, 212, 211, 212, 254, 286, 209, 254, 241, - /* 1650 */ 240, 254, 99, 286, 215, 220, 214, 244, 105, 106, - /* 1660 */ 214, 214, 244, 273, 224, 112, 192, 114, 115, 116, - /* 1670 */ 60, 290, 119, 5, 139, 196, 196, 38, 10, 11, - /* 1680 */ 12, 13, 14, 238, 240, 17, 196, 148, 287, 287, - /* 1690 */ 276, 113, 22, 147, 241, 43, 229, 241, 30, 18, - /* 1700 */ 32, 232, 232, 150, 151, 152, 153, 154, 40, 232, - /* 1710 */ 232, 196, 18, 195, 265, 265, 264, 241, 264, 196, - /* 1720 */ 155, 229, 229, 241, 241, 241, 195, 62, 196, 195, - /* 1730 */ 22, 113, 216, 196, 222, 195, 195, 282, 70, 196, - /* 1740 */ 283, 213, 216, 213, 64, 22, 78, 124, 219, 81, - /* 1750 */ 162, 111, 219, 142, 256, 213, 113, 255, 213, 256, - /* 1760 */ 216, 303, 215, 213, 213, 97, 255, 213, 216, 275, - /* 1770 */ 275, 222, 216, 256, 255, 196, 91, 82, 256, 255, - /* 1780 */ 308, 308, 146, 22, 143, 196, 155, 260, 25, 145, - /* 1790 */ 144, 199, 26, 198, 13, 190, 190, 6, 293, 131, - /* 1800 */ 188, 188, 245, 244, 136, 137, 245, 243, 242, 241, - /* 1810 */ 188, 202, 208, 217, 217, 260, 208, 4, 202, 3, - /* 1820 */ 22, 202, 208, 208, 160, 15, 209, 159, 270, 209, - /* 1830 */ 98, 16, 272, 208, 23, 23, 137, 148, 128, 20, - /* 1840 */ 140, 24, 16, 142, 1, 140, 149, 128, 61, 53, - /* 1850 */ 148, 37, 53, 53, 53, 128, 114, 34, 296, 296, - /* 1860 */ 139, 1, 5, 22, 113, 158, 26, 75, 41, 139, - /* 1870 */ 68, 68, 113, 24, 20, 19, 129, 123, 23, 96, - /* 1880 */ 22, 22, 37, 22, 22, 67, 22, 67, 59, 24, - /* 1890 */ 23, 28, 67, 147, 22, 26, 23, 23, 23, 23, - /* 1900 */ 22, 24, 23, 22, 24, 23, 139, 23, 114, 22, - /* 1910 */ 141, 26, 88, 75, 86, 44, 23, 34, 22, 75, - /* 1920 */ 34, 24, 34, 34, 34, 93, 34, 26, 26, 34, - /* 1930 */ 23, 23, 23, 23, 23, 11, 23, 22, 26, 22, - /* 1940 */ 22, 133, 23, 23, 22, 22, 139, 26, 139, 23, - /* 1950 */ 15, 1, 1, 310, 310, 310, 310, 310, 310, 310, - /* 1960 */ 139, 139, 310, 310, 310, 310, 310, 310, 310, 310, - /* 1970 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 1980 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 1990 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2000 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2010 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2020 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2030 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2040 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2050 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2060 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2070 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2080 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2090 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2100 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2110 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2120 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2130 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2140 */ 310, 310, 310, + /* 450 */ 54, 55, 56, 57, 191, 117, 191, 210, 19, 152, + /* 460 */ 153, 154, 23, 295, 102, 103, 104, 105, 106, 107, + /* 470 */ 108, 109, 110, 111, 112, 266, 213, 214, 213, 214, + /* 480 */ 142, 81, 43, 44, 45, 46, 47, 48, 49, 50, + /* 490 */ 51, 52, 53, 54, 55, 56, 57, 301, 102, 103, + /* 500 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 106, + /* 510 */ 107, 118, 59, 250, 121, 122, 123, 280, 76, 119, + /* 520 */ 236, 237, 259, 306, 131, 72, 59, 310, 19, 87, + /* 530 */ 283, 89, 23, 249, 92, 288, 22, 137, 138, 22, + /* 540 */ 275, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 550 */ 111, 112, 43, 44, 45, 46, 47, 48, 49, 50, + /* 560 */ 51, 52, 53, 54, 55, 56, 57, 19, 115, 116, + /* 570 */ 117, 23, 186, 59, 188, 108, 59, 241, 191, 193, + /* 580 */ 26, 195, 115, 116, 117, 191, 144, 251, 202, 22, + /* 590 */ 100, 43, 44, 45, 46, 47, 48, 49, 50, 51, + /* 600 */ 52, 53, 54, 55, 56, 57, 116, 213, 214, 191, + /* 610 */ 120, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 620 */ 111, 112, 236, 237, 306, 238, 59, 26, 310, 115, + /* 630 */ 116, 117, 115, 116, 117, 249, 246, 19, 248, 106, + /* 640 */ 107, 23, 152, 153, 154, 46, 47, 48, 49, 263, + /* 650 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 660 */ 112, 43, 44, 45, 46, 47, 48, 49, 50, 51, + /* 670 */ 52, 53, 54, 55, 56, 57, 19, 76, 298, 299, + /* 680 */ 23, 295, 115, 116, 117, 152, 191, 154, 301, 73, + /* 690 */ 89, 137, 138, 92, 22, 191, 144, 22, 191, 191, + /* 700 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 710 */ 53, 54, 55, 56, 57, 163, 191, 213, 214, 120, + /* 720 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 730 */ 112, 59, 228, 191, 59, 191, 236, 237, 213, 214, + /* 740 */ 11, 59, 126, 127, 128, 238, 19, 26, 191, 249, + /* 750 */ 23, 164, 165, 228, 191, 213, 214, 213, 214, 102, + /* 760 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 770 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 780 */ 53, 54, 55, 56, 57, 19, 241, 115, 116, 117, + /* 790 */ 115, 116, 117, 191, 250, 238, 251, 115, 116, 117, + /* 800 */ 157, 23, 159, 191, 26, 191, 111, 112, 301, 43, + /* 810 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + /* 820 */ 54, 55, 56, 57, 142, 213, 214, 213, 214, 102, + /* 830 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 840 */ 228, 191, 228, 191, 191, 207, 208, 209, 126, 127, + /* 850 */ 128, 133, 289, 135, 136, 19, 127, 128, 301, 7, + /* 860 */ 8, 9, 141, 213, 214, 213, 214, 265, 102, 103, + /* 870 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 43, + /* 880 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + /* 890 */ 54, 55, 56, 57, 191, 117, 191, 22, 23, 19, + /* 900 */ 250, 26, 250, 191, 223, 191, 126, 127, 128, 205, + /* 910 */ 206, 205, 206, 260, 21, 202, 213, 214, 213, 214, + /* 920 */ 142, 270, 208, 209, 158, 45, 46, 47, 48, 49, + /* 930 */ 50, 51, 52, 53, 54, 55, 56, 57, 102, 103, + /* 940 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 236, + /* 950 */ 237, 12, 191, 250, 76, 250, 191, 22, 23, 308, + /* 960 */ 309, 26, 249, 202, 191, 191, 27, 89, 191, 202, + /* 970 */ 92, 202, 260, 80, 213, 214, 101, 203, 213, 214, + /* 980 */ 22, 42, 102, 103, 104, 105, 106, 107, 108, 109, + /* 990 */ 110, 111, 112, 228, 158, 281, 108, 236, 237, 225, + /* 1000 */ 191, 227, 63, 236, 237, 236, 237, 191, 235, 191, + /* 1010 */ 249, 250, 73, 241, 19, 122, 249, 59, 249, 24, + /* 1020 */ 259, 29, 134, 251, 191, 33, 22, 139, 24, 213, + /* 1030 */ 214, 213, 214, 191, 19, 210, 101, 191, 43, 44, + /* 1040 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + /* 1050 */ 55, 56, 57, 160, 19, 213, 214, 65, 43, 44, + /* 1060 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + /* 1070 */ 55, 56, 57, 191, 116, 22, 191, 24, 43, 44, + /* 1080 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + /* 1090 */ 55, 56, 57, 191, 261, 213, 214, 102, 103, 104, + /* 1100 */ 105, 106, 107, 108, 109, 110, 111, 112, 59, 19, + /* 1110 */ 191, 265, 59, 288, 191, 213, 214, 102, 103, 104, + /* 1120 */ 105, 106, 107, 108, 109, 110, 111, 112, 35, 191, + /* 1130 */ 66, 191, 213, 214, 191, 270, 46, 102, 103, 104, + /* 1140 */ 105, 106, 107, 108, 109, 110, 111, 112, 191, 85, + /* 1150 */ 265, 213, 214, 213, 214, 106, 107, 19, 94, 66, + /* 1160 */ 137, 138, 191, 114, 115, 116, 117, 74, 119, 116, + /* 1170 */ 213, 214, 202, 308, 309, 306, 127, 191, 235, 310, + /* 1180 */ 59, 191, 44, 45, 46, 47, 48, 49, 50, 51, + /* 1190 */ 52, 53, 54, 55, 56, 57, 191, 76, 196, 213, + /* 1200 */ 214, 152, 12, 154, 114, 191, 236, 237, 87, 145, + /* 1210 */ 89, 19, 20, 92, 22, 22, 23, 27, 191, 249, + /* 1220 */ 130, 202, 129, 202, 191, 235, 306, 152, 36, 154, + /* 1230 */ 310, 191, 42, 191, 191, 191, 115, 116, 117, 191, + /* 1240 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 1250 */ 112, 59, 99, 63, 191, 236, 237, 236, 237, 191, + /* 1260 */ 289, 213, 214, 71, 191, 144, 261, 191, 249, 191, + /* 1270 */ 249, 19, 20, 81, 22, 235, 213, 214, 235, 191, + /* 1280 */ 278, 213, 214, 191, 282, 132, 213, 214, 36, 213, + /* 1290 */ 214, 191, 100, 191, 101, 191, 191, 191, 106, 107, + /* 1300 */ 48, 213, 214, 261, 191, 113, 191, 115, 116, 117, + /* 1310 */ 191, 59, 120, 213, 214, 213, 214, 213, 214, 213, + /* 1320 */ 214, 22, 23, 71, 237, 59, 213, 214, 191, 137, + /* 1330 */ 138, 191, 213, 214, 191, 191, 249, 85, 191, 261, + /* 1340 */ 191, 26, 90, 15, 152, 153, 154, 155, 156, 19, + /* 1350 */ 213, 214, 100, 213, 214, 191, 213, 214, 106, 107, + /* 1360 */ 213, 214, 213, 214, 191, 113, 261, 115, 116, 117, + /* 1370 */ 253, 254, 120, 229, 191, 191, 191, 213, 214, 191, + /* 1380 */ 306, 289, 116, 26, 310, 191, 213, 214, 60, 19, + /* 1390 */ 296, 297, 24, 191, 46, 191, 213, 214, 213, 214, + /* 1400 */ 101, 213, 214, 191, 152, 153, 154, 155, 156, 0, + /* 1410 */ 1, 2, 191, 229, 5, 213, 214, 213, 214, 10, + /* 1420 */ 11, 12, 13, 14, 1, 2, 17, 191, 5, 19, + /* 1430 */ 20, 191, 22, 10, 11, 12, 13, 14, 191, 30, + /* 1440 */ 17, 32, 241, 148, 149, 115, 36, 191, 241, 40, + /* 1450 */ 191, 26, 251, 30, 191, 32, 141, 22, 251, 5, + /* 1460 */ 213, 214, 114, 40, 10, 11, 12, 13, 14, 59, + /* 1470 */ 134, 17, 213, 214, 191, 139, 213, 214, 191, 70, + /* 1480 */ 191, 71, 125, 191, 30, 115, 32, 78, 53, 191, + /* 1490 */ 81, 191, 191, 70, 40, 85, 213, 214, 191, 22, + /* 1500 */ 90, 78, 213, 214, 81, 213, 214, 98, 140, 120, + /* 1510 */ 100, 213, 214, 213, 214, 23, 106, 107, 26, 130, + /* 1520 */ 191, 98, 191, 113, 70, 115, 116, 117, 23, 22, + /* 1530 */ 120, 26, 78, 19, 191, 81, 19, 20, 61, 22, + /* 1540 */ 191, 132, 213, 214, 213, 214, 137, 138, 59, 191, + /* 1550 */ 191, 191, 98, 36, 128, 132, 213, 214, 128, 59, + /* 1560 */ 137, 138, 152, 153, 154, 155, 156, 83, 84, 144, + /* 1570 */ 161, 213, 214, 213, 214, 23, 59, 151, 26, 23, + /* 1580 */ 23, 151, 26, 26, 161, 59, 132, 23, 71, 191, + /* 1590 */ 26, 137, 138, 119, 120, 23, 19, 20, 26, 22, + /* 1600 */ 223, 23, 85, 23, 26, 116, 26, 90, 191, 7, + /* 1610 */ 8, 97, 152, 36, 154, 161, 116, 100, 23, 23, + /* 1620 */ 191, 26, 26, 106, 107, 191, 23, 223, 23, 26, + /* 1630 */ 113, 26, 115, 116, 117, 23, 59, 120, 26, 191, + /* 1640 */ 191, 191, 116, 255, 191, 252, 191, 140, 71, 191, + /* 1650 */ 315, 233, 191, 191, 191, 191, 191, 191, 191, 191, + /* 1660 */ 191, 285, 284, 239, 252, 252, 252, 252, 240, 152, + /* 1670 */ 153, 154, 155, 156, 189, 294, 268, 100, 242, 268, + /* 1680 */ 264, 211, 290, 106, 107, 108, 264, 256, 256, 243, + /* 1690 */ 113, 290, 115, 116, 117, 268, 217, 120, 226, 243, + /* 1700 */ 222, 268, 216, 19, 20, 246, 22, 216, 256, 216, + /* 1710 */ 194, 60, 38, 242, 277, 294, 240, 242, 198, 246, + /* 1720 */ 36, 140, 198, 198, 19, 20, 150, 22, 149, 152, + /* 1730 */ 153, 154, 155, 156, 294, 291, 291, 280, 22, 43, + /* 1740 */ 231, 36, 18, 59, 234, 234, 234, 234, 267, 269, + /* 1750 */ 18, 198, 197, 231, 148, 71, 269, 269, 243, 231, + /* 1760 */ 198, 243, 267, 197, 59, 157, 243, 198, 62, 287, + /* 1770 */ 243, 197, 22, 198, 114, 64, 71, 218, 218, 197, + /* 1780 */ 286, 198, 197, 215, 100, 215, 215, 224, 22, 125, + /* 1790 */ 106, 107, 164, 24, 221, 112, 143, 113, 302, 115, + /* 1800 */ 116, 117, 218, 215, 120, 100, 217, 221, 215, 215, + /* 1810 */ 309, 106, 107, 215, 224, 279, 279, 218, 113, 258, + /* 1820 */ 115, 116, 117, 114, 257, 120, 91, 198, 82, 147, + /* 1830 */ 144, 22, 274, 314, 198, 314, 152, 153, 154, 155, + /* 1840 */ 156, 276, 157, 146, 145, 258, 25, 247, 257, 201, + /* 1850 */ 258, 26, 200, 13, 257, 244, 246, 152, 153, 154, + /* 1860 */ 155, 156, 258, 262, 257, 247, 245, 243, 192, 192, + /* 1870 */ 6, 262, 204, 210, 219, 210, 210, 190, 190, 190, + /* 1880 */ 210, 219, 204, 211, 211, 210, 4, 3, 22, 162, + /* 1890 */ 15, 23, 16, 23, 204, 138, 129, 150, 26, 141, + /* 1900 */ 20, 24, 143, 16, 1, 141, 129, 129, 61, 53, + /* 1910 */ 37, 150, 297, 300, 300, 53, 53, 129, 53, 115, + /* 1920 */ 34, 140, 1, 5, 22, 114, 68, 26, 160, 68, + /* 1930 */ 75, 41, 140, 114, 24, 20, 19, 130, 124, 23, + /* 1940 */ 96, 22, 22, 37, 22, 67, 22, 59, 67, 24, + /* 1950 */ 22, 28, 67, 23, 148, 22, 97, 23, 23, 23, + /* 1960 */ 140, 23, 22, 26, 23, 23, 115, 22, 142, 26, + /* 1970 */ 75, 88, 75, 34, 23, 86, 44, 22, 34, 26, + /* 1980 */ 34, 34, 34, 34, 93, 24, 26, 23, 34, 23, + /* 1990 */ 23, 23, 23, 11, 23, 22, 26, 22, 22, 15, + /* 2000 */ 23, 23, 22, 22, 1, 26, 23, 140, 134, 140, + /* 2010 */ 1, 140, 316, 316, 316, 316, 316, 316, 316, 140, + /* 2020 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2030 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2040 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2050 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2060 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2070 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2080 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2090 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2100 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2110 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2120 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2130 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2140 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2150 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2160 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2170 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2180 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2190 */ 316, 316, 316, 316, 316, 316, 316, 316, 316, 316, + /* 2200 */ 316, 316, 316, }; -#define YY_SHIFT_COUNT (552) +#define YY_SHIFT_COUNT (569) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (1951) +#define YY_SHIFT_MAX (2009) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 1448, 1277, 1668, 1072, 1072, 340, 1122, 1225, 1332, 1481, - /* 10 */ 1481, 1481, 335, 0, 0, 180, 897, 1481, 1481, 1481, - /* 20 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 30 */ 930, 930, 1020, 1020, 290, 1, 340, 340, 340, 340, - /* 40 */ 340, 340, 40, 110, 219, 288, 327, 396, 435, 504, - /* 50 */ 543, 612, 651, 720, 877, 897, 897, 897, 897, 897, - /* 60 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 70 */ 897, 897, 897, 917, 897, 1019, 763, 763, 1451, 1481, - /* 80 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 90 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 100 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 110 */ 1481, 1481, 1553, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 120 */ 1481, 1481, 1481, 1481, 1481, 1481, 147, 258, 258, 258, - /* 130 */ 258, 258, 79, 65, 84, 449, 19, 786, 449, 636, - /* 140 */ 636, 449, 880, 880, 880, 880, 113, 142, 142, 472, - /* 150 */ 150, 1962, 1962, 399, 399, 399, 93, 237, 341, 237, - /* 160 */ 237, 1074, 1074, 437, 350, 704, 1080, 449, 449, 449, - /* 170 */ 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, - /* 180 */ 449, 449, 449, 449, 449, 449, 449, 449, 818, 818, - /* 190 */ 449, 1088, 217, 217, 734, 734, 1124, 1126, 1962, 1962, - /* 200 */ 1962, 739, 840, 840, 453, 454, 511, 187, 563, 570, - /* 210 */ 898, 669, 449, 449, 449, 449, 449, 449, 449, 449, - /* 220 */ 449, 670, 449, 449, 449, 449, 449, 449, 449, 449, - /* 230 */ 449, 449, 449, 449, 674, 674, 674, 449, 449, 449, - /* 240 */ 449, 1034, 449, 449, 449, 972, 1107, 449, 449, 1193, - /* 250 */ 449, 449, 449, 449, 449, 449, 449, 449, 260, 177, - /* 260 */ 489, 1241, 1241, 1241, 1241, 1192, 489, 489, 952, 1197, - /* 270 */ 625, 1235, 1131, 181, 181, 1086, 1139, 1131, 1086, 1187, - /* 280 */ 1319, 1237, 1318, 1318, 1318, 181, 1299, 1299, 1109, 1336, - /* 290 */ 549, 1376, 1610, 1535, 1535, 1639, 1639, 1535, 1539, 1578, - /* 300 */ 1670, 1546, 1652, 1546, 1681, 1681, 1681, 1681, 1535, 1694, - /* 310 */ 1546, 1546, 1578, 1670, 1652, 1546, 1652, 1546, 1535, 1694, - /* 320 */ 1565, 1665, 1535, 1694, 1708, 1535, 1694, 1535, 1694, 1708, - /* 330 */ 1618, 1618, 1618, 1680, 1723, 1723, 1708, 1618, 1623, 1618, - /* 340 */ 1680, 1618, 1618, 1588, 1708, 1640, 1640, 1708, 1611, 1643, - /* 350 */ 1611, 1643, 1611, 1643, 1611, 1643, 1535, 1685, 1685, 1695, - /* 360 */ 1695, 1636, 1641, 1761, 1535, 1631, 1636, 1644, 1646, 1546, - /* 370 */ 1763, 1766, 1781, 1781, 1791, 1791, 1791, 1962, 1962, 1962, - /* 380 */ 1962, 1962, 1962, 1962, 1962, 1962, 1962, 1962, 1962, 1962, - /* 390 */ 1962, 1962, 308, 835, 954, 1232, 879, 715, 728, 1373, - /* 400 */ 864, 1329, 1253, 1409, 297, 1431, 1489, 1497, 1520, 1521, - /* 410 */ 1525, 1362, 1309, 1491, 1217, 1420, 1429, 1536, 1380, 1538, - /* 420 */ 1293, 1354, 1548, 1585, 1434, 1342, 1813, 1816, 1798, 1664, - /* 430 */ 1810, 1732, 1815, 1811, 1812, 1699, 1689, 1710, 1817, 1700, - /* 440 */ 1819, 1701, 1826, 1843, 1705, 1697, 1719, 1787, 1814, 1702, - /* 450 */ 1796, 1799, 1800, 1801, 1727, 1742, 1823, 1721, 1860, 1857, - /* 460 */ 1841, 1751, 1707, 1802, 1840, 1803, 1792, 1827, 1730, 1759, - /* 470 */ 1849, 1854, 1856, 1747, 1754, 1858, 1818, 1859, 1861, 1855, - /* 480 */ 1862, 1820, 1829, 1865, 1783, 1863, 1864, 1825, 1845, 1867, - /* 490 */ 1746, 1872, 1873, 1874, 1875, 1869, 1876, 1878, 1877, 1879, - /* 500 */ 1881, 1880, 1767, 1882, 1884, 1794, 1883, 1887, 1769, 1885, - /* 510 */ 1886, 1888, 1889, 1890, 1824, 1838, 1828, 1871, 1844, 1832, - /* 520 */ 1892, 1893, 1896, 1897, 1901, 1902, 1895, 1907, 1885, 1908, - /* 530 */ 1909, 1910, 1911, 1912, 1913, 1915, 1924, 1917, 1918, 1919, - /* 540 */ 1920, 1922, 1923, 1921, 1808, 1807, 1809, 1821, 1822, 1926, - /* 550 */ 1935, 1950, 1951, + /* 0 */ 1423, 1409, 1454, 1192, 1192, 36, 1252, 1410, 1517, 1684, + /* 10 */ 1684, 1684, 292, 0, 0, 180, 1015, 1684, 1684, 1684, + /* 20 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, + /* 30 */ 1049, 1049, 1121, 1121, 54, 400, 36, 36, 36, 36, + /* 40 */ 36, 40, 110, 219, 289, 396, 439, 509, 548, 618, + /* 50 */ 657, 727, 766, 836, 995, 1015, 1015, 1015, 1015, 1015, + /* 60 */ 1015, 1015, 1015, 1015, 1015, 1015, 1015, 1015, 1015, 1015, + /* 70 */ 1015, 1015, 1015, 1035, 1015, 1138, 880, 880, 1577, 1684, + /* 80 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, + /* 90 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, + /* 100 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, + /* 110 */ 1684, 1684, 1684, 1705, 1684, 1684, 1684, 1684, 1684, 1684, + /* 120 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 146, 84, 84, + /* 130 */ 84, 84, 84, 362, 269, 125, 97, 453, 66, 66, + /* 140 */ 893, 1090, 66, 66, 533, 533, 66, 554, 554, 554, + /* 150 */ 554, 192, 587, 587, 695, 25, 2020, 2020, 290, 290, + /* 160 */ 290, 200, 514, 514, 514, 514, 939, 939, 442, 875, + /* 170 */ 935, 66, 66, 66, 66, 66, 66, 66, 66, 66, + /* 180 */ 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + /* 190 */ 66, 601, 601, 66, 729, 878, 878, 1266, 1266, 552, + /* 200 */ 1023, 2020, 2020, 2020, 2020, 2020, 2020, 2020, 307, 490, + /* 210 */ 490, 567, 393, 517, 467, 672, 242, 682, 675, 66, + /* 220 */ 66, 66, 66, 66, 66, 66, 66, 66, 66, 616, + /* 230 */ 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + /* 240 */ 66, 66, 1093, 1093, 1093, 66, 66, 66, 778, 66, + /* 250 */ 66, 66, 1053, 1064, 66, 66, 1190, 66, 66, 66, + /* 260 */ 66, 66, 66, 66, 66, 722, 992, 718, 253, 253, + /* 270 */ 253, 253, 338, 718, 718, 888, 403, 852, 1328, 254, + /* 280 */ 1295, 721, 1330, 1295, 1330, 1370, 234, 254, 254, 234, + /* 290 */ 254, 721, 1370, 1357, 1492, 1348, 385, 385, 385, 1330, + /* 300 */ 1425, 1425, 643, 1315, 1336, 1004, 1651, 1651, 1581, 1581, + /* 310 */ 1674, 1674, 1581, 1576, 1579, 1716, 1696, 1724, 1724, 1724, + /* 320 */ 1724, 1581, 1732, 1606, 1579, 1579, 1606, 1716, 1696, 1606, + /* 330 */ 1696, 1606, 1581, 1732, 1608, 1706, 1581, 1732, 1750, 1581, + /* 340 */ 1732, 1581, 1732, 1750, 1660, 1660, 1660, 1711, 1766, 1766, + /* 350 */ 1750, 1660, 1664, 1660, 1711, 1660, 1660, 1628, 1769, 1683, + /* 360 */ 1683, 1750, 1653, 1709, 1653, 1709, 1653, 1709, 1653, 1709, + /* 370 */ 1581, 1735, 1735, 1746, 1746, 1682, 1686, 1809, 1581, 1685, + /* 380 */ 1682, 1697, 1699, 1606, 1821, 1825, 1840, 1840, 1864, 1864, + /* 390 */ 1864, 2020, 2020, 2020, 2020, 2020, 2020, 2020, 2020, 2020, + /* 400 */ 2020, 2020, 2020, 2020, 2020, 2020, 599, 223, 1193, 1299, + /* 410 */ 228, 780, 958, 1505, 1153, 1435, 1368, 1426, 1430, 1552, + /* 420 */ 1477, 1556, 1557, 1564, 1572, 1578, 1580, 1489, 1474, 1602, + /* 430 */ 1389, 1514, 1500, 1595, 1596, 1484, 1603, 1075, 1460, 1605, + /* 440 */ 1612, 1526, 1507, 1882, 1884, 1866, 1727, 1875, 1876, 1868, + /* 450 */ 1870, 1757, 1747, 1767, 1872, 1872, 1877, 1758, 1880, 1759, + /* 460 */ 1887, 1903, 1764, 1777, 1872, 1778, 1847, 1873, 1872, 1761, + /* 470 */ 1856, 1862, 1863, 1865, 1788, 1804, 1886, 1781, 1921, 1918, + /* 480 */ 1902, 1811, 1768, 1858, 1901, 1861, 1855, 1890, 1792, 1819, + /* 490 */ 1910, 1915, 1917, 1807, 1814, 1919, 1878, 1920, 1922, 1916, + /* 500 */ 1924, 1881, 1888, 1925, 1844, 1923, 1928, 1885, 1906, 1930, + /* 510 */ 1806, 1933, 1934, 1935, 1936, 1937, 1938, 1940, 1859, 1820, + /* 520 */ 1941, 1942, 1851, 1939, 1945, 1826, 1943, 1944, 1946, 1947, + /* 530 */ 1948, 1883, 1895, 1889, 1932, 1897, 1891, 1949, 1951, 1955, + /* 540 */ 1961, 1953, 1960, 1954, 1964, 1943, 1966, 1967, 1968, 1969, + /* 550 */ 1970, 1971, 1973, 1982, 1975, 1976, 1977, 1978, 1980, 1981, + /* 560 */ 1979, 1874, 1867, 1869, 1871, 1879, 1983, 1984, 2003, 2009, }; -#define YY_REDUCE_COUNT (391) -#define YY_REDUCE_MIN (-262) -#define YY_REDUCE_MAX (1625) +#define YY_REDUCE_COUNT (405) +#define YY_REDUCE_MIN (-265) +#define YY_REDUCE_MAX (1690) static const short yy_reduce_ofst[] = { - /* 0 */ 490, -122, 545, 645, 650, -120, -189, -187, -184, -182, - /* 10 */ -178, -176, 45, 30, 200, -251, -134, 390, 392, 521, - /* 20 */ 523, 213, 692, 821, 284, 589, 872, 666, 671, 866, - /* 30 */ 71, 111, 273, 389, 686, 815, 904, 932, 948, 955, - /* 40 */ 964, 969, -259, -259, -259, -259, -259, -259, -259, -259, - /* 50 */ -259, -259, -259, -259, -259, -259, -259, -259, -259, -259, - /* 60 */ -259, -259, -259, -259, -259, -259, -259, -259, -259, -259, - /* 70 */ -259, -259, -259, -259, -259, -259, -259, -259, 428, 430, - /* 80 */ 899, 985, 1021, 1028, 1057, 1069, 1081, 1108, 1110, 1115, - /* 90 */ 1117, 1123, 1149, 1154, 1159, 1170, 1174, 1178, 1183, 1194, - /* 100 */ 1198, 1204, 1208, 1212, 1218, 1222, 1229, 1278, 1280, 1283, - /* 110 */ 1285, 1313, 1316, 1320, 1322, 1325, 1327, 1330, 1366, 1371, - /* 120 */ 1379, 1387, 1417, 1425, 1430, 1432, -259, -259, -259, -259, - /* 130 */ -259, -259, -259, -259, -259, 557, 974, -214, -174, -9, - /* 140 */ 431, -124, 806, 925, 806, 925, 251, 928, 940, -259, - /* 150 */ -259, -259, -259, -198, -198, -198, 127, -186, -168, 212, - /* 160 */ 646, 617, 799, -262, 555, 220, 220, 491, 605, 1040, - /* 170 */ 1060, 699, -11, 600, 848, 862, 345, -129, 724, -91, - /* 180 */ 158, 749, 716, 900, 304, 822, 929, 926, 499, 793, - /* 190 */ 322, 892, 813, 845, 958, 1056, 751, 905, 1133, 1062, - /* 200 */ 803, -210, -185, -179, -148, -167, -89, 121, 274, 281, - /* 210 */ 320, 336, 439, 663, 711, 957, 965, 1064, 1068, 1112, - /* 220 */ 1116, -196, 1127, 1134, 1180, 1184, 1195, 1199, 1203, 1215, - /* 230 */ 1223, 1250, 1267, 1286, 205, 422, 638, 1324, 1341, 1364, - /* 240 */ 1365, 1213, 1392, 1399, 1403, 869, 1260, 1405, 1421, 1276, - /* 250 */ 1424, 121, 1426, 1427, 1428, 1433, 1436, 1437, 1227, 1338, - /* 260 */ 1284, 1359, 1370, 1377, 1388, 1213, 1284, 1284, 1385, 1438, - /* 270 */ 1443, 1349, 1400, 1391, 1394, 1360, 1408, 1410, 1367, 1439, - /* 280 */ 1440, 1435, 1442, 1446, 1447, 1397, 1413, 1418, 1390, 1444, - /* 290 */ 1445, 1474, 1381, 1479, 1480, 1401, 1402, 1490, 1414, 1449, - /* 300 */ 1452, 1453, 1467, 1456, 1469, 1470, 1477, 1478, 1515, 1518, - /* 310 */ 1476, 1482, 1450, 1454, 1492, 1483, 1493, 1484, 1523, 1531, - /* 320 */ 1457, 1455, 1532, 1534, 1516, 1537, 1540, 1543, 1541, 1526, - /* 330 */ 1528, 1530, 1542, 1512, 1529, 1533, 1544, 1545, 1547, 1550, - /* 340 */ 1549, 1551, 1554, 1458, 1552, 1494, 1495, 1556, 1498, 1502, - /* 350 */ 1503, 1511, 1517, 1519, 1522, 1524, 1579, 1472, 1473, 1527, - /* 360 */ 1555, 1557, 1559, 1558, 1589, 1560, 1561, 1564, 1566, 1568, - /* 370 */ 1592, 1595, 1605, 1606, 1612, 1613, 1622, 1562, 1563, 1505, - /* 380 */ 1609, 1604, 1608, 1614, 1615, 1616, 1596, 1597, 1617, 1620, - /* 390 */ 1625, 1619, + /* 0 */ 111, 168, 386, 761, -176, -174, -191, -189, -181, -178, + /* 10 */ 176, 263, 44, -207, -204, -265, -139, -114, 158, 504, + /* 20 */ 525, 544, 612, 614, 650, 652, 765, 265, 703, 705, + /* 30 */ 70, 714, -187, 127, 774, 713, 767, 769, 970, 1019, + /* 40 */ 1021, -255, -255, -255, -255, -255, -255, -255, -255, -255, + /* 50 */ -255, -255, -255, -255, -255, -255, -255, -255, -255, -255, + /* 60 */ -255, -255, -255, -255, -255, -255, -255, -255, -255, -255, + /* 70 */ -255, -255, -255, -255, -255, -255, -255, -255, 394, 542, + /* 80 */ 816, 818, 842, 882, 902, 919, 938, 940, 957, 986, + /* 90 */ 1048, 1063, 1068, 1073, 1076, 1088, 1100, 1102, 1104, 1106, + /* 100 */ 1113, 1119, 1137, 1140, 1143, 1147, 1149, 1164, 1173, 1183, + /* 110 */ 1185, 1188, 1202, 1204, 1247, 1259, 1263, 1283, 1289, 1292, + /* 120 */ 1298, 1300, 1329, 1331, 1343, 1358, 1360, -255, -255, -255, + /* 130 */ -255, -255, -255, -255, -255, 196, -255, 387, -177, 507, + /* 140 */ 1002, -219, 557, -93, -167, 638, -121, 284, 500, 284, + /* 150 */ 500, 247, 651, 865, -255, -255, -255, -255, -85, -85, + /* 160 */ -85, 237, 171, 602, 846, 885, -212, -203, 217, 380, + /* 170 */ 380, -23, 161, 653, 712, 773, 943, 990, 1040, 563, + /* 180 */ 833, 971, 1005, 1042, 1092, 1078, 1043, 1144, 1184, -186, + /* 190 */ 1105, 318, 869, 7, 825, 920, 1074, 704, 706, 390, + /* 200 */ 1087, 1094, 336, 545, 772, 1201, 1117, 1207, -179, -137, + /* 210 */ -112, -13, 18, 112, 197, 418, 495, 508, 777, 809, + /* 220 */ 923, 1014, 1027, 1033, 1044, 1115, 1194, 1212, 1221, 209, + /* 230 */ 1236, 1240, 1256, 1287, 1301, 1307, 1349, 1359, 1398, 1417, + /* 240 */ 1429, 1434, 681, 1377, 1404, 1448, 1449, 1450, 1388, 1453, + /* 250 */ 1455, 1458, 1393, 1335, 1461, 1462, 1418, 1463, 197, 1464, + /* 260 */ 1465, 1466, 1467, 1468, 1469, 1376, 1378, 1424, 1412, 1413, + /* 270 */ 1414, 1415, 1388, 1424, 1424, 1428, 1470, 1485, 1381, 1408, + /* 280 */ 1416, 1436, 1431, 1422, 1432, 1392, 1446, 1411, 1427, 1456, + /* 290 */ 1433, 1471, 1401, 1479, 1472, 1478, 1486, 1491, 1493, 1452, + /* 300 */ 1459, 1473, 1437, 1475, 1476, 1516, 1421, 1440, 1520, 1524, + /* 310 */ 1444, 1445, 1525, 1457, 1480, 1481, 1509, 1510, 1511, 1512, + /* 320 */ 1513, 1553, 1555, 1515, 1487, 1488, 1518, 1495, 1522, 1523, + /* 330 */ 1528, 1527, 1562, 1566, 1482, 1494, 1569, 1574, 1559, 1575, + /* 340 */ 1582, 1583, 1585, 1560, 1568, 1570, 1571, 1563, 1573, 1586, + /* 350 */ 1584, 1588, 1589, 1593, 1590, 1594, 1598, 1501, 1496, 1536, + /* 360 */ 1537, 1599, 1561, 1567, 1587, 1591, 1592, 1597, 1604, 1607, + /* 370 */ 1629, 1519, 1521, 1601, 1609, 1600, 1610, 1558, 1636, 1565, + /* 380 */ 1618, 1621, 1611, 1624, 1648, 1652, 1676, 1677, 1687, 1688, + /* 390 */ 1689, 1613, 1614, 1615, 1668, 1663, 1665, 1666, 1670, 1678, + /* 400 */ 1655, 1662, 1672, 1673, 1675, 1690, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1575, 1575, 1575, 1411, 1188, 1297, 1188, 1188, 1188, 1411, - /* 10 */ 1411, 1411, 1188, 1327, 1327, 1464, 1219, 1188, 1188, 1188, - /* 20 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1410, 1188, 1188, - /* 30 */ 1188, 1188, 1494, 1494, 1188, 1188, 1188, 1188, 1188, 1188, - /* 40 */ 1188, 1188, 1188, 1336, 1188, 1188, 1188, 1188, 1188, 1188, - /* 50 */ 1412, 1413, 1188, 1188, 1188, 1463, 1465, 1428, 1346, 1345, - /* 60 */ 1344, 1343, 1446, 1314, 1341, 1334, 1338, 1406, 1407, 1405, - /* 70 */ 1409, 1413, 1412, 1188, 1337, 1377, 1391, 1376, 1188, 1188, - /* 80 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 90 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 100 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 110 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 120 */ 1188, 1188, 1188, 1188, 1188, 1188, 1385, 1390, 1396, 1389, - /* 130 */ 1386, 1379, 1378, 1380, 1381, 1188, 1209, 1261, 1188, 1188, - /* 140 */ 1188, 1188, 1482, 1481, 1188, 1188, 1219, 1371, 1370, 1382, - /* 150 */ 1383, 1393, 1392, 1471, 1529, 1528, 1429, 1188, 1188, 1188, - /* 160 */ 1188, 1188, 1188, 1494, 1188, 1188, 1188, 1188, 1188, 1188, - /* 170 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 180 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1494, 1494, - /* 190 */ 1188, 1219, 1494, 1494, 1215, 1215, 1321, 1188, 1477, 1297, - /* 200 */ 1288, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 210 */ 1188, 1188, 1188, 1188, 1188, 1468, 1466, 1188, 1188, 1188, - /* 220 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 230 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 240 */ 1188, 1188, 1188, 1188, 1188, 1293, 1188, 1188, 1188, 1188, - /* 250 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1523, 1188, 1441, - /* 260 */ 1275, 1293, 1293, 1293, 1293, 1295, 1276, 1274, 1287, 1220, - /* 270 */ 1195, 1567, 1294, 1316, 1316, 1564, 1340, 1294, 1564, 1236, - /* 280 */ 1545, 1231, 1327, 1327, 1327, 1316, 1321, 1321, 1408, 1294, - /* 290 */ 1287, 1188, 1567, 1302, 1302, 1566, 1566, 1302, 1429, 1349, - /* 300 */ 1355, 1340, 1264, 1340, 1270, 1270, 1270, 1270, 1302, 1206, - /* 310 */ 1340, 1340, 1349, 1355, 1264, 1340, 1264, 1340, 1302, 1206, - /* 320 */ 1445, 1561, 1302, 1206, 1419, 1302, 1206, 1302, 1206, 1419, - /* 330 */ 1262, 1262, 1262, 1251, 1188, 1188, 1419, 1262, 1236, 1262, - /* 340 */ 1251, 1262, 1262, 1512, 1419, 1423, 1423, 1419, 1320, 1315, - /* 350 */ 1320, 1315, 1320, 1315, 1320, 1315, 1302, 1504, 1504, 1330, - /* 360 */ 1330, 1335, 1321, 1414, 1302, 1188, 1335, 1333, 1331, 1340, - /* 370 */ 1212, 1254, 1526, 1526, 1522, 1522, 1522, 1572, 1572, 1477, - /* 380 */ 1538, 1219, 1219, 1219, 1219, 1538, 1238, 1238, 1220, 1220, - /* 390 */ 1219, 1538, 1188, 1188, 1188, 1188, 1188, 1188, 1533, 1188, - /* 400 */ 1430, 1306, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 410 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 420 */ 1188, 1188, 1188, 1188, 1188, 1360, 1188, 1191, 1474, 1188, - /* 430 */ 1188, 1472, 1188, 1188, 1188, 1188, 1188, 1188, 1307, 1188, - /* 440 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 450 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1563, 1188, 1188, - /* 460 */ 1188, 1188, 1188, 1188, 1444, 1443, 1188, 1188, 1304, 1188, - /* 470 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 480 */ 1188, 1188, 1234, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 490 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 500 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1332, - /* 510 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 520 */ 1188, 1188, 1188, 1188, 1509, 1322, 1188, 1188, 1554, 1188, - /* 530 */ 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, - /* 540 */ 1188, 1188, 1188, 1549, 1278, 1362, 1188, 1361, 1365, 1188, - /* 550 */ 1200, 1188, 1188, + /* 0 */ 1623, 1623, 1623, 1453, 1223, 1332, 1223, 1223, 1223, 1453, + /* 10 */ 1453, 1453, 1223, 1362, 1362, 1506, 1254, 1223, 1223, 1223, + /* 20 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1452, 1223, 1223, + /* 30 */ 1223, 1223, 1541, 1541, 1223, 1223, 1223, 1223, 1223, 1223, + /* 40 */ 1223, 1223, 1371, 1223, 1378, 1223, 1223, 1223, 1223, 1223, + /* 50 */ 1454, 1455, 1223, 1223, 1223, 1505, 1507, 1470, 1385, 1384, + /* 60 */ 1383, 1382, 1488, 1349, 1376, 1369, 1373, 1448, 1449, 1447, + /* 70 */ 1451, 1455, 1454, 1223, 1372, 1419, 1433, 1418, 1223, 1223, + /* 80 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 90 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 100 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 110 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 120 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1427, 1432, 1438, + /* 130 */ 1431, 1428, 1421, 1420, 1422, 1223, 1423, 1223, 1223, 1223, + /* 140 */ 1244, 1296, 1223, 1223, 1223, 1223, 1223, 1525, 1524, 1223, + /* 150 */ 1223, 1254, 1413, 1412, 1424, 1425, 1435, 1434, 1513, 1576, + /* 160 */ 1575, 1471, 1223, 1223, 1223, 1223, 1223, 1223, 1541, 1223, + /* 170 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 180 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 190 */ 1223, 1541, 1541, 1223, 1254, 1541, 1541, 1250, 1250, 1356, + /* 200 */ 1223, 1520, 1323, 1323, 1323, 1323, 1332, 1323, 1223, 1223, + /* 210 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 220 */ 1223, 1223, 1223, 1510, 1508, 1223, 1223, 1223, 1223, 1223, + /* 230 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 240 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 250 */ 1223, 1223, 1328, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 260 */ 1223, 1223, 1223, 1223, 1570, 1223, 1483, 1310, 1328, 1328, + /* 270 */ 1328, 1328, 1330, 1311, 1309, 1322, 1255, 1230, 1615, 1388, + /* 280 */ 1377, 1329, 1351, 1377, 1351, 1612, 1375, 1388, 1388, 1375, + /* 290 */ 1388, 1329, 1612, 1271, 1592, 1266, 1362, 1362, 1362, 1351, + /* 300 */ 1356, 1356, 1450, 1329, 1322, 1223, 1615, 1615, 1337, 1337, + /* 310 */ 1614, 1614, 1337, 1471, 1599, 1397, 1299, 1305, 1305, 1305, + /* 320 */ 1305, 1337, 1241, 1375, 1599, 1599, 1375, 1397, 1299, 1375, + /* 330 */ 1299, 1375, 1337, 1241, 1487, 1609, 1337, 1241, 1461, 1337, + /* 340 */ 1241, 1337, 1241, 1461, 1297, 1297, 1297, 1286, 1223, 1223, + /* 350 */ 1461, 1297, 1271, 1297, 1286, 1297, 1297, 1559, 1223, 1465, + /* 360 */ 1465, 1461, 1355, 1350, 1355, 1350, 1355, 1350, 1355, 1350, + /* 370 */ 1337, 1551, 1551, 1365, 1365, 1370, 1356, 1456, 1337, 1223, + /* 380 */ 1370, 1368, 1366, 1375, 1247, 1289, 1573, 1573, 1569, 1569, + /* 390 */ 1569, 1620, 1620, 1520, 1585, 1254, 1254, 1254, 1254, 1585, + /* 400 */ 1273, 1273, 1255, 1255, 1254, 1585, 1223, 1223, 1223, 1223, + /* 410 */ 1223, 1223, 1580, 1223, 1515, 1472, 1341, 1223, 1223, 1223, + /* 420 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 430 */ 1223, 1526, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 440 */ 1223, 1223, 1402, 1223, 1226, 1517, 1223, 1223, 1223, 1223, + /* 450 */ 1223, 1223, 1223, 1223, 1379, 1380, 1342, 1223, 1223, 1223, + /* 460 */ 1223, 1223, 1223, 1223, 1394, 1223, 1223, 1223, 1389, 1223, + /* 470 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1611, 1223, 1223, + /* 480 */ 1223, 1223, 1223, 1223, 1486, 1485, 1223, 1223, 1339, 1223, + /* 490 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 500 */ 1223, 1223, 1269, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 510 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 520 */ 1223, 1223, 1223, 1223, 1223, 1223, 1367, 1223, 1223, 1223, + /* 530 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 540 */ 1223, 1556, 1357, 1223, 1223, 1602, 1223, 1223, 1223, 1223, + /* 550 */ 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, 1223, + /* 560 */ 1596, 1313, 1404, 1223, 1403, 1407, 1223, 1235, 1223, 1223, }; /********** End of lemon-generated parsing tables *****************************/ @@ -156096,6 +158275,7 @@ static const YYCODETYPE yyFallback[] = { 59, /* TIES => ID */ 59, /* GENERATED => ID */ 59, /* ALWAYS => ID */ + 59, /* MATERIALIZED => ID */ 59, /* REINDEX => ID */ 59, /* RENAME => ID */ 59, /* CTIME_KW => ID */ @@ -156147,6 +158327,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* HAVING => nothing */ 0, /* LIMIT => nothing */ 0, /* WHERE => nothing */ + 0, /* RETURNING => nothing */ 0, /* INTO => nothing */ 0, /* NOTHING => nothing */ 0, /* FLOAT => nothing */ @@ -156365,219 +158546,225 @@ static const char *const yyTokenName[] = { /* 94 */ "TIES", /* 95 */ "GENERATED", /* 96 */ "ALWAYS", - /* 97 */ "REINDEX", - /* 98 */ "RENAME", - /* 99 */ "CTIME_KW", - /* 100 */ "ANY", - /* 101 */ "BITAND", - /* 102 */ "BITOR", - /* 103 */ "LSHIFT", - /* 104 */ "RSHIFT", - /* 105 */ "PLUS", - /* 106 */ "MINUS", - /* 107 */ "STAR", - /* 108 */ "SLASH", - /* 109 */ "REM", - /* 110 */ "CONCAT", - /* 111 */ "COLLATE", - /* 112 */ "BITNOT", - /* 113 */ "ON", - /* 114 */ "INDEXED", - /* 115 */ "STRING", - /* 116 */ "JOIN_KW", - /* 117 */ "CONSTRAINT", - /* 118 */ "DEFAULT", - /* 119 */ "NULL", - /* 120 */ "PRIMARY", - /* 121 */ "UNIQUE", - /* 122 */ "CHECK", - /* 123 */ "REFERENCES", - /* 124 */ "AUTOINCR", - /* 125 */ "INSERT", - /* 126 */ "DELETE", - /* 127 */ "UPDATE", - /* 128 */ "SET", - /* 129 */ "DEFERRABLE", - /* 130 */ "FOREIGN", - /* 131 */ "DROP", - /* 132 */ "UNION", - /* 133 */ "ALL", - /* 134 */ "EXCEPT", - /* 135 */ "INTERSECT", - /* 136 */ "SELECT", - /* 137 */ "VALUES", - /* 138 */ "DISTINCT", - /* 139 */ "DOT", - /* 140 */ "FROM", - /* 141 */ "JOIN", - /* 142 */ "USING", - /* 143 */ "ORDER", - /* 144 */ "GROUP", - /* 145 */ "HAVING", - /* 146 */ "LIMIT", - /* 147 */ "WHERE", - /* 148 */ "INTO", - /* 149 */ "NOTHING", - /* 150 */ "FLOAT", - /* 151 */ "BLOB", - /* 152 */ "INTEGER", - /* 153 */ "VARIABLE", - /* 154 */ "CASE", - /* 155 */ "WHEN", - /* 156 */ "THEN", - /* 157 */ "ELSE", - /* 158 */ "INDEX", - /* 159 */ "ALTER", - /* 160 */ "ADD", - /* 161 */ "WINDOW", - /* 162 */ "OVER", - /* 163 */ "FILTER", - /* 164 */ "COLUMN", - /* 165 */ "AGG_FUNCTION", - /* 166 */ "AGG_COLUMN", - /* 167 */ "TRUEFALSE", - /* 168 */ "ISNOT", - /* 169 */ "FUNCTION", - /* 170 */ "UMINUS", - /* 171 */ "UPLUS", - /* 172 */ "TRUTH", - /* 173 */ "REGISTER", - /* 174 */ "VECTOR", - /* 175 */ "SELECT_COLUMN", - /* 176 */ "IF_NULL_ROW", - /* 177 */ "ASTERISK", - /* 178 */ "SPAN", - /* 179 */ "SPACE", - /* 180 */ "ILLEGAL", - /* 181 */ "input", - /* 182 */ "cmdlist", - /* 183 */ "ecmd", - /* 184 */ "cmdx", - /* 185 */ "explain", - /* 186 */ "cmd", - /* 187 */ "transtype", - /* 188 */ "trans_opt", - /* 189 */ "nm", - /* 190 */ "savepoint_opt", - /* 191 */ "create_table", - /* 192 */ "create_table_args", - /* 193 */ "createkw", - /* 194 */ "temp", - /* 195 */ "ifnotexists", - /* 196 */ "dbnm", - /* 197 */ "columnlist", - /* 198 */ "conslist_opt", - /* 199 */ "table_options", - /* 200 */ "select", - /* 201 */ "columnname", - /* 202 */ "carglist", - /* 203 */ "typetoken", - /* 204 */ "typename", - /* 205 */ "signed", - /* 206 */ "plus_num", - /* 207 */ "minus_num", - /* 208 */ "scanpt", - /* 209 */ "scantok", - /* 210 */ "ccons", - /* 211 */ "term", - /* 212 */ "expr", - /* 213 */ "onconf", - /* 214 */ "sortorder", - /* 215 */ "autoinc", - /* 216 */ "eidlist_opt", - /* 217 */ "refargs", - /* 218 */ "defer_subclause", - /* 219 */ "generated", - /* 220 */ "refarg", - /* 221 */ "refact", - /* 222 */ "init_deferred_pred_opt", - /* 223 */ "conslist", - /* 224 */ "tconscomma", - /* 225 */ "tcons", - /* 226 */ "sortlist", - /* 227 */ "eidlist", - /* 228 */ "defer_subclause_opt", - /* 229 */ "orconf", - /* 230 */ "resolvetype", - /* 231 */ "raisetype", - /* 232 */ "ifexists", - /* 233 */ "fullname", - /* 234 */ "selectnowith", - /* 235 */ "oneselect", - /* 236 */ "wqlist", - /* 237 */ "multiselect_op", - /* 238 */ "distinct", - /* 239 */ "selcollist", - /* 240 */ "from", - /* 241 */ "where_opt", - /* 242 */ "groupby_opt", - /* 243 */ "having_opt", - /* 244 */ "orderby_opt", - /* 245 */ "limit_opt", - /* 246 */ "window_clause", - /* 247 */ "values", - /* 248 */ "nexprlist", - /* 249 */ "sclp", - /* 250 */ "as", - /* 251 */ "seltablist", - /* 252 */ "stl_prefix", - /* 253 */ "joinop", - /* 254 */ "indexed_opt", - /* 255 */ "on_opt", - /* 256 */ "using_opt", - /* 257 */ "exprlist", - /* 258 */ "xfullname", - /* 259 */ "idlist", - /* 260 */ "nulls", - /* 261 */ "with", - /* 262 */ "setlist", - /* 263 */ "insert_cmd", - /* 264 */ "idlist_opt", - /* 265 */ "upsert", - /* 266 */ "filter_over", - /* 267 */ "likeop", - /* 268 */ "between_op", - /* 269 */ "in_op", - /* 270 */ "paren_exprlist", - /* 271 */ "case_operand", - /* 272 */ "case_exprlist", - /* 273 */ "case_else", - /* 274 */ "uniqueflag", - /* 275 */ "collate", - /* 276 */ "vinto", - /* 277 */ "nmnum", - /* 278 */ "trigger_decl", - /* 279 */ "trigger_cmd_list", - /* 280 */ "trigger_time", - /* 281 */ "trigger_event", - /* 282 */ "foreach_clause", - /* 283 */ "when_clause", - /* 284 */ "trigger_cmd", - /* 285 */ "trnm", - /* 286 */ "tridxby", - /* 287 */ "database_kw_opt", - /* 288 */ "key_opt", - /* 289 */ "add_column_fullname", - /* 290 */ "kwcolumn_opt", - /* 291 */ "create_vtab", - /* 292 */ "vtabarglist", - /* 293 */ "vtabarg", - /* 294 */ "vtabargtoken", - /* 295 */ "lp", - /* 296 */ "anylist", - /* 297 */ "windowdefn_list", - /* 298 */ "windowdefn", - /* 299 */ "window", - /* 300 */ "frame_opt", - /* 301 */ "part_opt", - /* 302 */ "filter_clause", - /* 303 */ "over_clause", - /* 304 */ "range_or_rows", - /* 305 */ "frame_bound", - /* 306 */ "frame_bound_s", - /* 307 */ "frame_bound_e", - /* 308 */ "frame_exclude_opt", - /* 309 */ "frame_exclude", + /* 97 */ "MATERIALIZED", + /* 98 */ "REINDEX", + /* 99 */ "RENAME", + /* 100 */ "CTIME_KW", + /* 101 */ "ANY", + /* 102 */ "BITAND", + /* 103 */ "BITOR", + /* 104 */ "LSHIFT", + /* 105 */ "RSHIFT", + /* 106 */ "PLUS", + /* 107 */ "MINUS", + /* 108 */ "STAR", + /* 109 */ "SLASH", + /* 110 */ "REM", + /* 111 */ "CONCAT", + /* 112 */ "COLLATE", + /* 113 */ "BITNOT", + /* 114 */ "ON", + /* 115 */ "INDEXED", + /* 116 */ "STRING", + /* 117 */ "JOIN_KW", + /* 118 */ "CONSTRAINT", + /* 119 */ "DEFAULT", + /* 120 */ "NULL", + /* 121 */ "PRIMARY", + /* 122 */ "UNIQUE", + /* 123 */ "CHECK", + /* 124 */ "REFERENCES", + /* 125 */ "AUTOINCR", + /* 126 */ "INSERT", + /* 127 */ "DELETE", + /* 128 */ "UPDATE", + /* 129 */ "SET", + /* 130 */ "DEFERRABLE", + /* 131 */ "FOREIGN", + /* 132 */ "DROP", + /* 133 */ "UNION", + /* 134 */ "ALL", + /* 135 */ "EXCEPT", + /* 136 */ "INTERSECT", + /* 137 */ "SELECT", + /* 138 */ "VALUES", + /* 139 */ "DISTINCT", + /* 140 */ "DOT", + /* 141 */ "FROM", + /* 142 */ "JOIN", + /* 143 */ "USING", + /* 144 */ "ORDER", + /* 145 */ "GROUP", + /* 146 */ "HAVING", + /* 147 */ "LIMIT", + /* 148 */ "WHERE", + /* 149 */ "RETURNING", + /* 150 */ "INTO", + /* 151 */ "NOTHING", + /* 152 */ "FLOAT", + /* 153 */ "BLOB", + /* 154 */ "INTEGER", + /* 155 */ "VARIABLE", + /* 156 */ "CASE", + /* 157 */ "WHEN", + /* 158 */ "THEN", + /* 159 */ "ELSE", + /* 160 */ "INDEX", + /* 161 */ "ALTER", + /* 162 */ "ADD", + /* 163 */ "WINDOW", + /* 164 */ "OVER", + /* 165 */ "FILTER", + /* 166 */ "COLUMN", + /* 167 */ "AGG_FUNCTION", + /* 168 */ "AGG_COLUMN", + /* 169 */ "TRUEFALSE", + /* 170 */ "ISNOT", + /* 171 */ "FUNCTION", + /* 172 */ "UMINUS", + /* 173 */ "UPLUS", + /* 174 */ "TRUTH", + /* 175 */ "REGISTER", + /* 176 */ "VECTOR", + /* 177 */ "SELECT_COLUMN", + /* 178 */ "IF_NULL_ROW", + /* 179 */ "ASTERISK", + /* 180 */ "SPAN", + /* 181 */ "SPACE", + /* 182 */ "ILLEGAL", + /* 183 */ "input", + /* 184 */ "cmdlist", + /* 185 */ "ecmd", + /* 186 */ "cmdx", + /* 187 */ "explain", + /* 188 */ "cmd", + /* 189 */ "transtype", + /* 190 */ "trans_opt", + /* 191 */ "nm", + /* 192 */ "savepoint_opt", + /* 193 */ "create_table", + /* 194 */ "create_table_args", + /* 195 */ "createkw", + /* 196 */ "temp", + /* 197 */ "ifnotexists", + /* 198 */ "dbnm", + /* 199 */ "columnlist", + /* 200 */ "conslist_opt", + /* 201 */ "table_options", + /* 202 */ "select", + /* 203 */ "columnname", + /* 204 */ "carglist", + /* 205 */ "typetoken", + /* 206 */ "typename", + /* 207 */ "signed", + /* 208 */ "plus_num", + /* 209 */ "minus_num", + /* 210 */ "scanpt", + /* 211 */ "scantok", + /* 212 */ "ccons", + /* 213 */ "term", + /* 214 */ "expr", + /* 215 */ "onconf", + /* 216 */ "sortorder", + /* 217 */ "autoinc", + /* 218 */ "eidlist_opt", + /* 219 */ "refargs", + /* 220 */ "defer_subclause", + /* 221 */ "generated", + /* 222 */ "refarg", + /* 223 */ "refact", + /* 224 */ "init_deferred_pred_opt", + /* 225 */ "conslist", + /* 226 */ "tconscomma", + /* 227 */ "tcons", + /* 228 */ "sortlist", + /* 229 */ "eidlist", + /* 230 */ "defer_subclause_opt", + /* 231 */ "orconf", + /* 232 */ "resolvetype", + /* 233 */ "raisetype", + /* 234 */ "ifexists", + /* 235 */ "fullname", + /* 236 */ "selectnowith", + /* 237 */ "oneselect", + /* 238 */ "wqlist", + /* 239 */ "multiselect_op", + /* 240 */ "distinct", + /* 241 */ "selcollist", + /* 242 */ "from", + /* 243 */ "where_opt", + /* 244 */ "groupby_opt", + /* 245 */ "having_opt", + /* 246 */ "orderby_opt", + /* 247 */ "limit_opt", + /* 248 */ "window_clause", + /* 249 */ "values", + /* 250 */ "nexprlist", + /* 251 */ "sclp", + /* 252 */ "as", + /* 253 */ "seltablist", + /* 254 */ "stl_prefix", + /* 255 */ "joinop", + /* 256 */ "indexed_opt", + /* 257 */ "on_opt", + /* 258 */ "using_opt", + /* 259 */ "exprlist", + /* 260 */ "xfullname", + /* 261 */ "idlist", + /* 262 */ "nulls", + /* 263 */ "with", + /* 264 */ "where_opt_ret", + /* 265 */ "setlist", + /* 266 */ "insert_cmd", + /* 267 */ "idlist_opt", + /* 268 */ "upsert", + /* 269 */ "returning", + /* 270 */ "filter_over", + /* 271 */ "likeop", + /* 272 */ "between_op", + /* 273 */ "in_op", + /* 274 */ "paren_exprlist", + /* 275 */ "case_operand", + /* 276 */ "case_exprlist", + /* 277 */ "case_else", + /* 278 */ "uniqueflag", + /* 279 */ "collate", + /* 280 */ "vinto", + /* 281 */ "nmnum", + /* 282 */ "trigger_decl", + /* 283 */ "trigger_cmd_list", + /* 284 */ "trigger_time", + /* 285 */ "trigger_event", + /* 286 */ "foreach_clause", + /* 287 */ "when_clause", + /* 288 */ "trigger_cmd", + /* 289 */ "trnm", + /* 290 */ "tridxby", + /* 291 */ "database_kw_opt", + /* 292 */ "key_opt", + /* 293 */ "add_column_fullname", + /* 294 */ "kwcolumn_opt", + /* 295 */ "create_vtab", + /* 296 */ "vtabarglist", + /* 297 */ "vtabarg", + /* 298 */ "vtabargtoken", + /* 299 */ "lp", + /* 300 */ "anylist", + /* 301 */ "wqitem", + /* 302 */ "wqas", + /* 303 */ "windowdefn_list", + /* 304 */ "windowdefn", + /* 305 */ "window", + /* 306 */ "frame_opt", + /* 307 */ "part_opt", + /* 308 */ "filter_clause", + /* 309 */ "over_clause", + /* 310 */ "range_or_rows", + /* 311 */ "frame_bound", + /* 312 */ "frame_bound_s", + /* 313 */ "frame_bound_e", + /* 314 */ "frame_exclude_opt", + /* 315 */ "frame_exclude", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -156733,243 +158920,256 @@ static const char *const yyRuleName[] = { /* 145 */ "limit_opt ::= LIMIT expr", /* 146 */ "limit_opt ::= LIMIT expr OFFSET expr", /* 147 */ "limit_opt ::= LIMIT expr COMMA expr", - /* 148 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt", + /* 148 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret", /* 149 */ "where_opt ::=", /* 150 */ "where_opt ::= WHERE expr", - /* 151 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt", - /* 152 */ "setlist ::= setlist COMMA nm EQ expr", - /* 153 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", - /* 154 */ "setlist ::= nm EQ expr", - /* 155 */ "setlist ::= LP idlist RP EQ expr", - /* 156 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", - /* 157 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES", - /* 158 */ "upsert ::=", - /* 159 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt", - /* 160 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING", - /* 161 */ "upsert ::= ON CONFLICT DO NOTHING", - /* 162 */ "insert_cmd ::= INSERT orconf", - /* 163 */ "insert_cmd ::= REPLACE", - /* 164 */ "idlist_opt ::=", - /* 165 */ "idlist_opt ::= LP idlist RP", - /* 166 */ "idlist ::= idlist COMMA nm", - /* 167 */ "idlist ::= nm", - /* 168 */ "expr ::= LP expr RP", - /* 169 */ "expr ::= ID|INDEXED", - /* 170 */ "expr ::= JOIN_KW", - /* 171 */ "expr ::= nm DOT nm", - /* 172 */ "expr ::= nm DOT nm DOT nm", - /* 173 */ "term ::= NULL|FLOAT|BLOB", - /* 174 */ "term ::= STRING", - /* 175 */ "term ::= INTEGER", - /* 176 */ "expr ::= VARIABLE", - /* 177 */ "expr ::= expr COLLATE ID|STRING", - /* 178 */ "expr ::= CAST LP expr AS typetoken RP", - /* 179 */ "expr ::= ID|INDEXED LP distinct exprlist RP", - /* 180 */ "expr ::= ID|INDEXED LP STAR RP", - /* 181 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over", - /* 182 */ "expr ::= ID|INDEXED LP STAR RP filter_over", - /* 183 */ "term ::= CTIME_KW", - /* 184 */ "expr ::= LP nexprlist COMMA expr RP", - /* 185 */ "expr ::= expr AND expr", - /* 186 */ "expr ::= expr OR expr", - /* 187 */ "expr ::= expr LT|GT|GE|LE expr", - /* 188 */ "expr ::= expr EQ|NE expr", - /* 189 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 190 */ "expr ::= expr PLUS|MINUS expr", - /* 191 */ "expr ::= expr STAR|SLASH|REM expr", - /* 192 */ "expr ::= expr CONCAT expr", - /* 193 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 194 */ "expr ::= expr likeop expr", - /* 195 */ "expr ::= expr likeop expr ESCAPE expr", - /* 196 */ "expr ::= expr ISNULL|NOTNULL", - /* 197 */ "expr ::= expr NOT NULL", - /* 198 */ "expr ::= expr IS expr", - /* 199 */ "expr ::= expr IS NOT expr", - /* 200 */ "expr ::= NOT expr", - /* 201 */ "expr ::= BITNOT expr", - /* 202 */ "expr ::= PLUS|MINUS expr", - /* 203 */ "between_op ::= BETWEEN", - /* 204 */ "between_op ::= NOT BETWEEN", - /* 205 */ "expr ::= expr between_op expr AND expr", - /* 206 */ "in_op ::= IN", - /* 207 */ "in_op ::= NOT IN", - /* 208 */ "expr ::= expr in_op LP exprlist RP", - /* 209 */ "expr ::= LP select RP", - /* 210 */ "expr ::= expr in_op LP select RP", - /* 211 */ "expr ::= expr in_op nm dbnm paren_exprlist", - /* 212 */ "expr ::= EXISTS LP select RP", - /* 213 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 214 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 215 */ "case_exprlist ::= WHEN expr THEN expr", - /* 216 */ "case_else ::= ELSE expr", - /* 217 */ "case_else ::=", - /* 218 */ "case_operand ::= expr", - /* 219 */ "case_operand ::=", - /* 220 */ "exprlist ::=", - /* 221 */ "nexprlist ::= nexprlist COMMA expr", - /* 222 */ "nexprlist ::= expr", - /* 223 */ "paren_exprlist ::=", - /* 224 */ "paren_exprlist ::= LP exprlist RP", - /* 225 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", - /* 226 */ "uniqueflag ::= UNIQUE", - /* 227 */ "uniqueflag ::=", - /* 228 */ "eidlist_opt ::=", - /* 229 */ "eidlist_opt ::= LP eidlist RP", - /* 230 */ "eidlist ::= eidlist COMMA nm collate sortorder", - /* 231 */ "eidlist ::= nm collate sortorder", - /* 232 */ "collate ::=", - /* 233 */ "collate ::= COLLATE ID|STRING", - /* 234 */ "cmd ::= DROP INDEX ifexists fullname", - /* 235 */ "cmd ::= VACUUM vinto", - /* 236 */ "cmd ::= VACUUM nm vinto", - /* 237 */ "vinto ::= INTO expr", - /* 238 */ "vinto ::=", - /* 239 */ "cmd ::= PRAGMA nm dbnm", - /* 240 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", - /* 241 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", - /* 242 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", - /* 243 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", - /* 244 */ "plus_num ::= PLUS INTEGER|FLOAT", - /* 245 */ "minus_num ::= MINUS INTEGER|FLOAT", - /* 246 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", - /* 247 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", - /* 248 */ "trigger_time ::= BEFORE|AFTER", - /* 249 */ "trigger_time ::= INSTEAD OF", - /* 250 */ "trigger_time ::=", - /* 251 */ "trigger_event ::= DELETE|INSERT", - /* 252 */ "trigger_event ::= UPDATE", - /* 253 */ "trigger_event ::= UPDATE OF idlist", - /* 254 */ "when_clause ::=", - /* 255 */ "when_clause ::= WHEN expr", - /* 256 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", - /* 257 */ "trigger_cmd_list ::= trigger_cmd SEMI", - /* 258 */ "trnm ::= nm DOT nm", - /* 259 */ "tridxby ::= INDEXED BY nm", - /* 260 */ "tridxby ::= NOT INDEXED", - /* 261 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", - /* 262 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", - /* 263 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", - /* 264 */ "trigger_cmd ::= scanpt select scanpt", - /* 265 */ "expr ::= RAISE LP IGNORE RP", - /* 266 */ "expr ::= RAISE LP raisetype COMMA nm RP", - /* 267 */ "raisetype ::= ROLLBACK", - /* 268 */ "raisetype ::= ABORT", - /* 269 */ "raisetype ::= FAIL", - /* 270 */ "cmd ::= DROP TRIGGER ifexists fullname", - /* 271 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", - /* 272 */ "cmd ::= DETACH database_kw_opt expr", - /* 273 */ "key_opt ::=", - /* 274 */ "key_opt ::= KEY expr", - /* 275 */ "cmd ::= REINDEX", - /* 276 */ "cmd ::= REINDEX nm dbnm", - /* 277 */ "cmd ::= ANALYZE", - /* 278 */ "cmd ::= ANALYZE nm dbnm", - /* 279 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", - /* 280 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", - /* 281 */ "add_column_fullname ::= fullname", - /* 282 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", - /* 283 */ "cmd ::= create_vtab", - /* 284 */ "cmd ::= create_vtab LP vtabarglist RP", - /* 285 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", - /* 286 */ "vtabarg ::=", - /* 287 */ "vtabargtoken ::= ANY", - /* 288 */ "vtabargtoken ::= lp anylist RP", - /* 289 */ "lp ::= LP", - /* 290 */ "with ::= WITH wqlist", - /* 291 */ "with ::= WITH RECURSIVE wqlist", - /* 292 */ "wqlist ::= nm eidlist_opt AS LP select RP", - /* 293 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP", - /* 294 */ "windowdefn_list ::= windowdefn", - /* 295 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", - /* 296 */ "windowdefn ::= nm AS LP window RP", - /* 297 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", - /* 298 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", - /* 299 */ "window ::= ORDER BY sortlist frame_opt", - /* 300 */ "window ::= nm ORDER BY sortlist frame_opt", - /* 301 */ "window ::= frame_opt", - /* 302 */ "window ::= nm frame_opt", - /* 303 */ "frame_opt ::=", - /* 304 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", - /* 305 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", - /* 306 */ "range_or_rows ::= RANGE|ROWS|GROUPS", - /* 307 */ "frame_bound_s ::= frame_bound", - /* 308 */ "frame_bound_s ::= UNBOUNDED PRECEDING", - /* 309 */ "frame_bound_e ::= frame_bound", - /* 310 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", - /* 311 */ "frame_bound ::= expr PRECEDING|FOLLOWING", - /* 312 */ "frame_bound ::= CURRENT ROW", - /* 313 */ "frame_exclude_opt ::=", - /* 314 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", - /* 315 */ "frame_exclude ::= NO OTHERS", - /* 316 */ "frame_exclude ::= CURRENT ROW", - /* 317 */ "frame_exclude ::= GROUP|TIES", - /* 318 */ "window_clause ::= WINDOW windowdefn_list", - /* 319 */ "filter_over ::= filter_clause over_clause", - /* 320 */ "filter_over ::= over_clause", - /* 321 */ "filter_over ::= filter_clause", - /* 322 */ "over_clause ::= OVER LP window RP", - /* 323 */ "over_clause ::= OVER nm", - /* 324 */ "filter_clause ::= FILTER LP WHERE expr RP", - /* 325 */ "input ::= cmdlist", - /* 326 */ "cmdlist ::= cmdlist ecmd", - /* 327 */ "cmdlist ::= ecmd", - /* 328 */ "ecmd ::= SEMI", - /* 329 */ "ecmd ::= cmdx SEMI", - /* 330 */ "ecmd ::= explain cmdx SEMI", - /* 331 */ "trans_opt ::=", - /* 332 */ "trans_opt ::= TRANSACTION", - /* 333 */ "trans_opt ::= TRANSACTION nm", - /* 334 */ "savepoint_opt ::= SAVEPOINT", - /* 335 */ "savepoint_opt ::=", - /* 336 */ "cmd ::= create_table create_table_args", - /* 337 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 338 */ "columnlist ::= columnname carglist", - /* 339 */ "nm ::= ID|INDEXED", - /* 340 */ "nm ::= STRING", - /* 341 */ "nm ::= JOIN_KW", - /* 342 */ "typetoken ::= typename", - /* 343 */ "typename ::= ID|STRING", - /* 344 */ "signed ::= plus_num", - /* 345 */ "signed ::= minus_num", - /* 346 */ "carglist ::= carglist ccons", - /* 347 */ "carglist ::=", - /* 348 */ "ccons ::= NULL onconf", - /* 349 */ "ccons ::= GENERATED ALWAYS AS generated", - /* 350 */ "ccons ::= AS generated", - /* 351 */ "conslist_opt ::= COMMA conslist", - /* 352 */ "conslist ::= conslist tconscomma tcons", - /* 353 */ "conslist ::= tcons", - /* 354 */ "tconscomma ::=", - /* 355 */ "defer_subclause_opt ::= defer_subclause", - /* 356 */ "resolvetype ::= raisetype", - /* 357 */ "selectnowith ::= oneselect", - /* 358 */ "oneselect ::= values", - /* 359 */ "sclp ::= selcollist COMMA", - /* 360 */ "as ::= ID|STRING", - /* 361 */ "expr ::= term", - /* 362 */ "likeop ::= LIKE_KW|MATCH", - /* 363 */ "exprlist ::= nexprlist", - /* 364 */ "nmnum ::= plus_num", - /* 365 */ "nmnum ::= nm", - /* 366 */ "nmnum ::= ON", - /* 367 */ "nmnum ::= DELETE", - /* 368 */ "nmnum ::= DEFAULT", - /* 369 */ "plus_num ::= INTEGER|FLOAT", - /* 370 */ "foreach_clause ::=", - /* 371 */ "foreach_clause ::= FOR EACH ROW", - /* 372 */ "trnm ::= nm", - /* 373 */ "tridxby ::=", - /* 374 */ "database_kw_opt ::= DATABASE", - /* 375 */ "database_kw_opt ::=", - /* 376 */ "kwcolumn_opt ::=", - /* 377 */ "kwcolumn_opt ::= COLUMNKW", - /* 378 */ "vtabarglist ::= vtabarg", - /* 379 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 380 */ "vtabarg ::= vtabarg vtabargtoken", - /* 381 */ "anylist ::=", - /* 382 */ "anylist ::= anylist LP anylist RP", - /* 383 */ "anylist ::= anylist ANY", - /* 384 */ "with ::=", + /* 151 */ "where_opt_ret ::=", + /* 152 */ "where_opt_ret ::= WHERE expr", + /* 153 */ "where_opt_ret ::= RETURNING selcollist", + /* 154 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", + /* 155 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret", + /* 156 */ "setlist ::= setlist COMMA nm EQ expr", + /* 157 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", + /* 158 */ "setlist ::= nm EQ expr", + /* 159 */ "setlist ::= LP idlist RP EQ expr", + /* 160 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", + /* 161 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", + /* 162 */ "upsert ::=", + /* 163 */ "upsert ::= RETURNING selcollist", + /* 164 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", + /* 165 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", + /* 166 */ "upsert ::= ON CONFLICT DO NOTHING returning", + /* 167 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", + /* 168 */ "returning ::= RETURNING selcollist", + /* 169 */ "insert_cmd ::= INSERT orconf", + /* 170 */ "insert_cmd ::= REPLACE", + /* 171 */ "idlist_opt ::=", + /* 172 */ "idlist_opt ::= LP idlist RP", + /* 173 */ "idlist ::= idlist COMMA nm", + /* 174 */ "idlist ::= nm", + /* 175 */ "expr ::= LP expr RP", + /* 176 */ "expr ::= ID|INDEXED", + /* 177 */ "expr ::= JOIN_KW", + /* 178 */ "expr ::= nm DOT nm", + /* 179 */ "expr ::= nm DOT nm DOT nm", + /* 180 */ "term ::= NULL|FLOAT|BLOB", + /* 181 */ "term ::= STRING", + /* 182 */ "term ::= INTEGER", + /* 183 */ "expr ::= VARIABLE", + /* 184 */ "expr ::= expr COLLATE ID|STRING", + /* 185 */ "expr ::= CAST LP expr AS typetoken RP", + /* 186 */ "expr ::= ID|INDEXED LP distinct exprlist RP", + /* 187 */ "expr ::= ID|INDEXED LP STAR RP", + /* 188 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over", + /* 189 */ "expr ::= ID|INDEXED LP STAR RP filter_over", + /* 190 */ "term ::= CTIME_KW", + /* 191 */ "expr ::= LP nexprlist COMMA expr RP", + /* 192 */ "expr ::= expr AND expr", + /* 193 */ "expr ::= expr OR expr", + /* 194 */ "expr ::= expr LT|GT|GE|LE expr", + /* 195 */ "expr ::= expr EQ|NE expr", + /* 196 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 197 */ "expr ::= expr PLUS|MINUS expr", + /* 198 */ "expr ::= expr STAR|SLASH|REM expr", + /* 199 */ "expr ::= expr CONCAT expr", + /* 200 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 201 */ "expr ::= expr likeop expr", + /* 202 */ "expr ::= expr likeop expr ESCAPE expr", + /* 203 */ "expr ::= expr ISNULL|NOTNULL", + /* 204 */ "expr ::= expr NOT NULL", + /* 205 */ "expr ::= expr IS expr", + /* 206 */ "expr ::= expr IS NOT expr", + /* 207 */ "expr ::= NOT expr", + /* 208 */ "expr ::= BITNOT expr", + /* 209 */ "expr ::= PLUS|MINUS expr", + /* 210 */ "between_op ::= BETWEEN", + /* 211 */ "between_op ::= NOT BETWEEN", + /* 212 */ "expr ::= expr between_op expr AND expr", + /* 213 */ "in_op ::= IN", + /* 214 */ "in_op ::= NOT IN", + /* 215 */ "expr ::= expr in_op LP exprlist RP", + /* 216 */ "expr ::= LP select RP", + /* 217 */ "expr ::= expr in_op LP select RP", + /* 218 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 219 */ "expr ::= EXISTS LP select RP", + /* 220 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 221 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 222 */ "case_exprlist ::= WHEN expr THEN expr", + /* 223 */ "case_else ::= ELSE expr", + /* 224 */ "case_else ::=", + /* 225 */ "case_operand ::= expr", + /* 226 */ "case_operand ::=", + /* 227 */ "exprlist ::=", + /* 228 */ "nexprlist ::= nexprlist COMMA expr", + /* 229 */ "nexprlist ::= expr", + /* 230 */ "paren_exprlist ::=", + /* 231 */ "paren_exprlist ::= LP exprlist RP", + /* 232 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", + /* 233 */ "uniqueflag ::= UNIQUE", + /* 234 */ "uniqueflag ::=", + /* 235 */ "eidlist_opt ::=", + /* 236 */ "eidlist_opt ::= LP eidlist RP", + /* 237 */ "eidlist ::= eidlist COMMA nm collate sortorder", + /* 238 */ "eidlist ::= nm collate sortorder", + /* 239 */ "collate ::=", + /* 240 */ "collate ::= COLLATE ID|STRING", + /* 241 */ "cmd ::= DROP INDEX ifexists fullname", + /* 242 */ "cmd ::= VACUUM vinto", + /* 243 */ "cmd ::= VACUUM nm vinto", + /* 244 */ "vinto ::= INTO expr", + /* 245 */ "vinto ::=", + /* 246 */ "cmd ::= PRAGMA nm dbnm", + /* 247 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", + /* 248 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", + /* 249 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", + /* 250 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", + /* 251 */ "plus_num ::= PLUS INTEGER|FLOAT", + /* 252 */ "minus_num ::= MINUS INTEGER|FLOAT", + /* 253 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", + /* 254 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", + /* 255 */ "trigger_time ::= BEFORE|AFTER", + /* 256 */ "trigger_time ::= INSTEAD OF", + /* 257 */ "trigger_time ::=", + /* 258 */ "trigger_event ::= DELETE|INSERT", + /* 259 */ "trigger_event ::= UPDATE", + /* 260 */ "trigger_event ::= UPDATE OF idlist", + /* 261 */ "when_clause ::=", + /* 262 */ "when_clause ::= WHEN expr", + /* 263 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", + /* 264 */ "trigger_cmd_list ::= trigger_cmd SEMI", + /* 265 */ "trnm ::= nm DOT nm", + /* 266 */ "tridxby ::= INDEXED BY nm", + /* 267 */ "tridxby ::= NOT INDEXED", + /* 268 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", + /* 269 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", + /* 270 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", + /* 271 */ "trigger_cmd ::= scanpt select scanpt", + /* 272 */ "expr ::= RAISE LP IGNORE RP", + /* 273 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 274 */ "raisetype ::= ROLLBACK", + /* 275 */ "raisetype ::= ABORT", + /* 276 */ "raisetype ::= FAIL", + /* 277 */ "cmd ::= DROP TRIGGER ifexists fullname", + /* 278 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", + /* 279 */ "cmd ::= DETACH database_kw_opt expr", + /* 280 */ "key_opt ::=", + /* 281 */ "key_opt ::= KEY expr", + /* 282 */ "cmd ::= REINDEX", + /* 283 */ "cmd ::= REINDEX nm dbnm", + /* 284 */ "cmd ::= ANALYZE", + /* 285 */ "cmd ::= ANALYZE nm dbnm", + /* 286 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", + /* 287 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", + /* 288 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", + /* 289 */ "add_column_fullname ::= fullname", + /* 290 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", + /* 291 */ "cmd ::= create_vtab", + /* 292 */ "cmd ::= create_vtab LP vtabarglist RP", + /* 293 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", + /* 294 */ "vtabarg ::=", + /* 295 */ "vtabargtoken ::= ANY", + /* 296 */ "vtabargtoken ::= lp anylist RP", + /* 297 */ "lp ::= LP", + /* 298 */ "with ::= WITH wqlist", + /* 299 */ "with ::= WITH RECURSIVE wqlist", + /* 300 */ "wqas ::= AS", + /* 301 */ "wqas ::= AS MATERIALIZED", + /* 302 */ "wqas ::= AS NOT MATERIALIZED", + /* 303 */ "wqitem ::= nm eidlist_opt wqas LP select RP", + /* 304 */ "wqlist ::= wqitem", + /* 305 */ "wqlist ::= wqlist COMMA wqitem", + /* 306 */ "windowdefn_list ::= windowdefn", + /* 307 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", + /* 308 */ "windowdefn ::= nm AS LP window RP", + /* 309 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", + /* 310 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", + /* 311 */ "window ::= ORDER BY sortlist frame_opt", + /* 312 */ "window ::= nm ORDER BY sortlist frame_opt", + /* 313 */ "window ::= frame_opt", + /* 314 */ "window ::= nm frame_opt", + /* 315 */ "frame_opt ::=", + /* 316 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", + /* 317 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", + /* 318 */ "range_or_rows ::= RANGE|ROWS|GROUPS", + /* 319 */ "frame_bound_s ::= frame_bound", + /* 320 */ "frame_bound_s ::= UNBOUNDED PRECEDING", + /* 321 */ "frame_bound_e ::= frame_bound", + /* 322 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", + /* 323 */ "frame_bound ::= expr PRECEDING|FOLLOWING", + /* 324 */ "frame_bound ::= CURRENT ROW", + /* 325 */ "frame_exclude_opt ::=", + /* 326 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", + /* 327 */ "frame_exclude ::= NO OTHERS", + /* 328 */ "frame_exclude ::= CURRENT ROW", + /* 329 */ "frame_exclude ::= GROUP|TIES", + /* 330 */ "window_clause ::= WINDOW windowdefn_list", + /* 331 */ "filter_over ::= filter_clause over_clause", + /* 332 */ "filter_over ::= over_clause", + /* 333 */ "filter_over ::= filter_clause", + /* 334 */ "over_clause ::= OVER LP window RP", + /* 335 */ "over_clause ::= OVER nm", + /* 336 */ "filter_clause ::= FILTER LP WHERE expr RP", + /* 337 */ "input ::= cmdlist", + /* 338 */ "cmdlist ::= cmdlist ecmd", + /* 339 */ "cmdlist ::= ecmd", + /* 340 */ "ecmd ::= SEMI", + /* 341 */ "ecmd ::= cmdx SEMI", + /* 342 */ "ecmd ::= explain cmdx SEMI", + /* 343 */ "trans_opt ::=", + /* 344 */ "trans_opt ::= TRANSACTION", + /* 345 */ "trans_opt ::= TRANSACTION nm", + /* 346 */ "savepoint_opt ::= SAVEPOINT", + /* 347 */ "savepoint_opt ::=", + /* 348 */ "cmd ::= create_table create_table_args", + /* 349 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 350 */ "columnlist ::= columnname carglist", + /* 351 */ "nm ::= ID|INDEXED", + /* 352 */ "nm ::= STRING", + /* 353 */ "nm ::= JOIN_KW", + /* 354 */ "typetoken ::= typename", + /* 355 */ "typename ::= ID|STRING", + /* 356 */ "signed ::= plus_num", + /* 357 */ "signed ::= minus_num", + /* 358 */ "carglist ::= carglist ccons", + /* 359 */ "carglist ::=", + /* 360 */ "ccons ::= NULL onconf", + /* 361 */ "ccons ::= GENERATED ALWAYS AS generated", + /* 362 */ "ccons ::= AS generated", + /* 363 */ "conslist_opt ::= COMMA conslist", + /* 364 */ "conslist ::= conslist tconscomma tcons", + /* 365 */ "conslist ::= tcons", + /* 366 */ "tconscomma ::=", + /* 367 */ "defer_subclause_opt ::= defer_subclause", + /* 368 */ "resolvetype ::= raisetype", + /* 369 */ "selectnowith ::= oneselect", + /* 370 */ "oneselect ::= values", + /* 371 */ "sclp ::= selcollist COMMA", + /* 372 */ "as ::= ID|STRING", + /* 373 */ "returning ::=", + /* 374 */ "expr ::= term", + /* 375 */ "likeop ::= LIKE_KW|MATCH", + /* 376 */ "exprlist ::= nexprlist", + /* 377 */ "nmnum ::= plus_num", + /* 378 */ "nmnum ::= nm", + /* 379 */ "nmnum ::= ON", + /* 380 */ "nmnum ::= DELETE", + /* 381 */ "nmnum ::= DEFAULT", + /* 382 */ "plus_num ::= INTEGER|FLOAT", + /* 383 */ "foreach_clause ::=", + /* 384 */ "foreach_clause ::= FOR EACH ROW", + /* 385 */ "trnm ::= nm", + /* 386 */ "tridxby ::=", + /* 387 */ "database_kw_opt ::= DATABASE", + /* 388 */ "database_kw_opt ::=", + /* 389 */ "kwcolumn_opt ::=", + /* 390 */ "kwcolumn_opt ::= COLUMNKW", + /* 391 */ "vtabarglist ::= vtabarg", + /* 392 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 393 */ "vtabarg ::= vtabarg vtabargtoken", + /* 394 */ "anylist ::=", + /* 395 */ "anylist ::= anylist LP anylist RP", + /* 396 */ "anylist ::= anylist ANY", + /* 397 */ "with ::=", }; #endif /* NDEBUG */ @@ -157095,98 +159295,99 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 200: /* select */ - case 234: /* selectnowith */ - case 235: /* oneselect */ - case 247: /* values */ + case 202: /* select */ + case 236: /* selectnowith */ + case 237: /* oneselect */ + case 249: /* values */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy539)); +sqlite3SelectDelete(pParse->db, (yypminor->yy307)); } break; - case 211: /* term */ - case 212: /* expr */ - case 241: /* where_opt */ - case 243: /* having_opt */ - case 255: /* on_opt */ - case 271: /* case_operand */ - case 273: /* case_else */ - case 276: /* vinto */ - case 283: /* when_clause */ - case 288: /* key_opt */ - case 302: /* filter_clause */ + case 213: /* term */ + case 214: /* expr */ + case 243: /* where_opt */ + case 245: /* having_opt */ + case 257: /* on_opt */ + case 264: /* where_opt_ret */ + case 275: /* case_operand */ + case 277: /* case_else */ + case 280: /* vinto */ + case 287: /* when_clause */ + case 292: /* key_opt */ + case 308: /* filter_clause */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy202)); +sqlite3ExprDelete(pParse->db, (yypminor->yy602)); } break; - case 216: /* eidlist_opt */ - case 226: /* sortlist */ - case 227: /* eidlist */ - case 239: /* selcollist */ - case 242: /* groupby_opt */ - case 244: /* orderby_opt */ - case 248: /* nexprlist */ - case 249: /* sclp */ - case 257: /* exprlist */ - case 262: /* setlist */ - case 270: /* paren_exprlist */ - case 272: /* case_exprlist */ - case 301: /* part_opt */ + case 218: /* eidlist_opt */ + case 228: /* sortlist */ + case 229: /* eidlist */ + case 241: /* selcollist */ + case 244: /* groupby_opt */ + case 246: /* orderby_opt */ + case 250: /* nexprlist */ + case 251: /* sclp */ + case 259: /* exprlist */ + case 265: /* setlist */ + case 274: /* paren_exprlist */ + case 276: /* case_exprlist */ + case 307: /* part_opt */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy242)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy338)); } break; - case 233: /* fullname */ - case 240: /* from */ - case 251: /* seltablist */ - case 252: /* stl_prefix */ - case 258: /* xfullname */ + case 235: /* fullname */ + case 242: /* from */ + case 253: /* seltablist */ + case 254: /* stl_prefix */ + case 260: /* xfullname */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy47)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy291)); } break; - case 236: /* wqlist */ + case 238: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy131)); +sqlite3WithDelete(pParse->db, (yypminor->yy195)); } break; - case 246: /* window_clause */ - case 297: /* windowdefn_list */ + case 248: /* window_clause */ + case 303: /* windowdefn_list */ { -sqlite3WindowListDelete(pParse->db, (yypminor->yy303)); +sqlite3WindowListDelete(pParse->db, (yypminor->yy19)); } break; - case 256: /* using_opt */ - case 259: /* idlist */ - case 264: /* idlist_opt */ + case 258: /* using_opt */ + case 261: /* idlist */ + case 267: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy600)); +sqlite3IdListDelete(pParse->db, (yypminor->yy288)); } break; - case 266: /* filter_over */ - case 298: /* windowdefn */ - case 299: /* window */ - case 300: /* frame_opt */ - case 303: /* over_clause */ + case 270: /* filter_over */ + case 304: /* windowdefn */ + case 305: /* window */ + case 306: /* frame_opt */ + case 309: /* over_clause */ { -sqlite3WindowDelete(pParse->db, (yypminor->yy303)); +sqlite3WindowDelete(pParse->db, (yypminor->yy19)); } break; - case 279: /* trigger_cmd_list */ - case 284: /* trigger_cmd */ + case 283: /* trigger_cmd_list */ + case 288: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy447)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy483)); } break; - case 281: /* trigger_event */ + case 285: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy230).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy50).b); } break; - case 305: /* frame_bound */ - case 306: /* frame_bound_s */ - case 307: /* frame_bound_e */ + case 311: /* frame_bound */ + case 312: /* frame_bound_s */ + case 313: /* frame_bound_e */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy77).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy113).pExpr); } break; /********* End destructor definitions *****************************************/ @@ -157477,391 +159678,404 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 185, /* (0) explain ::= EXPLAIN */ - 185, /* (1) explain ::= EXPLAIN QUERY PLAN */ - 184, /* (2) cmdx ::= cmd */ - 186, /* (3) cmd ::= BEGIN transtype trans_opt */ - 187, /* (4) transtype ::= */ - 187, /* (5) transtype ::= DEFERRED */ - 187, /* (6) transtype ::= IMMEDIATE */ - 187, /* (7) transtype ::= EXCLUSIVE */ - 186, /* (8) cmd ::= COMMIT|END trans_opt */ - 186, /* (9) cmd ::= ROLLBACK trans_opt */ - 186, /* (10) cmd ::= SAVEPOINT nm */ - 186, /* (11) cmd ::= RELEASE savepoint_opt nm */ - 186, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ - 191, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ - 193, /* (14) createkw ::= CREATE */ - 195, /* (15) ifnotexists ::= */ - 195, /* (16) ifnotexists ::= IF NOT EXISTS */ - 194, /* (17) temp ::= TEMP */ - 194, /* (18) temp ::= */ - 192, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */ - 192, /* (20) create_table_args ::= AS select */ - 199, /* (21) table_options ::= */ - 199, /* (22) table_options ::= WITHOUT nm */ - 201, /* (23) columnname ::= nm typetoken */ - 203, /* (24) typetoken ::= */ - 203, /* (25) typetoken ::= typename LP signed RP */ - 203, /* (26) typetoken ::= typename LP signed COMMA signed RP */ - 204, /* (27) typename ::= typename ID|STRING */ - 208, /* (28) scanpt ::= */ - 209, /* (29) scantok ::= */ - 210, /* (30) ccons ::= CONSTRAINT nm */ - 210, /* (31) ccons ::= DEFAULT scantok term */ - 210, /* (32) ccons ::= DEFAULT LP expr RP */ - 210, /* (33) ccons ::= DEFAULT PLUS scantok term */ - 210, /* (34) ccons ::= DEFAULT MINUS scantok term */ - 210, /* (35) ccons ::= DEFAULT scantok ID|INDEXED */ - 210, /* (36) ccons ::= NOT NULL onconf */ - 210, /* (37) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - 210, /* (38) ccons ::= UNIQUE onconf */ - 210, /* (39) ccons ::= CHECK LP expr RP */ - 210, /* (40) ccons ::= REFERENCES nm eidlist_opt refargs */ - 210, /* (41) ccons ::= defer_subclause */ - 210, /* (42) ccons ::= COLLATE ID|STRING */ - 219, /* (43) generated ::= LP expr RP */ - 219, /* (44) generated ::= LP expr RP ID */ - 215, /* (45) autoinc ::= */ - 215, /* (46) autoinc ::= AUTOINCR */ - 217, /* (47) refargs ::= */ - 217, /* (48) refargs ::= refargs refarg */ - 220, /* (49) refarg ::= MATCH nm */ - 220, /* (50) refarg ::= ON INSERT refact */ - 220, /* (51) refarg ::= ON DELETE refact */ - 220, /* (52) refarg ::= ON UPDATE refact */ - 221, /* (53) refact ::= SET NULL */ - 221, /* (54) refact ::= SET DEFAULT */ - 221, /* (55) refact ::= CASCADE */ - 221, /* (56) refact ::= RESTRICT */ - 221, /* (57) refact ::= NO ACTION */ - 218, /* (58) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - 218, /* (59) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 222, /* (60) init_deferred_pred_opt ::= */ - 222, /* (61) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - 222, /* (62) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 198, /* (63) conslist_opt ::= */ - 224, /* (64) tconscomma ::= COMMA */ - 225, /* (65) tcons ::= CONSTRAINT nm */ - 225, /* (66) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - 225, /* (67) tcons ::= UNIQUE LP sortlist RP onconf */ - 225, /* (68) tcons ::= CHECK LP expr RP onconf */ - 225, /* (69) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 228, /* (70) defer_subclause_opt ::= */ - 213, /* (71) onconf ::= */ - 213, /* (72) onconf ::= ON CONFLICT resolvetype */ - 229, /* (73) orconf ::= */ - 229, /* (74) orconf ::= OR resolvetype */ - 230, /* (75) resolvetype ::= IGNORE */ - 230, /* (76) resolvetype ::= REPLACE */ - 186, /* (77) cmd ::= DROP TABLE ifexists fullname */ - 232, /* (78) ifexists ::= IF EXISTS */ - 232, /* (79) ifexists ::= */ - 186, /* (80) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - 186, /* (81) cmd ::= DROP VIEW ifexists fullname */ - 186, /* (82) cmd ::= select */ - 200, /* (83) select ::= WITH wqlist selectnowith */ - 200, /* (84) select ::= WITH RECURSIVE wqlist selectnowith */ - 200, /* (85) select ::= selectnowith */ - 234, /* (86) selectnowith ::= selectnowith multiselect_op oneselect */ - 237, /* (87) multiselect_op ::= UNION */ - 237, /* (88) multiselect_op ::= UNION ALL */ - 237, /* (89) multiselect_op ::= EXCEPT|INTERSECT */ - 235, /* (90) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - 235, /* (91) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - 247, /* (92) values ::= VALUES LP nexprlist RP */ - 247, /* (93) values ::= values COMMA LP nexprlist RP */ - 238, /* (94) distinct ::= DISTINCT */ - 238, /* (95) distinct ::= ALL */ - 238, /* (96) distinct ::= */ - 249, /* (97) sclp ::= */ - 239, /* (98) selcollist ::= sclp scanpt expr scanpt as */ - 239, /* (99) selcollist ::= sclp scanpt STAR */ - 239, /* (100) selcollist ::= sclp scanpt nm DOT STAR */ - 250, /* (101) as ::= AS nm */ - 250, /* (102) as ::= */ - 240, /* (103) from ::= */ - 240, /* (104) from ::= FROM seltablist */ - 252, /* (105) stl_prefix ::= seltablist joinop */ - 252, /* (106) stl_prefix ::= */ - 251, /* (107) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ - 251, /* (108) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ - 251, /* (109) seltablist ::= stl_prefix LP select RP as on_opt using_opt */ - 251, /* (110) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ - 196, /* (111) dbnm ::= */ - 196, /* (112) dbnm ::= DOT nm */ - 233, /* (113) fullname ::= nm */ - 233, /* (114) fullname ::= nm DOT nm */ - 258, /* (115) xfullname ::= nm */ - 258, /* (116) xfullname ::= nm DOT nm */ - 258, /* (117) xfullname ::= nm DOT nm AS nm */ - 258, /* (118) xfullname ::= nm AS nm */ - 253, /* (119) joinop ::= COMMA|JOIN */ - 253, /* (120) joinop ::= JOIN_KW JOIN */ - 253, /* (121) joinop ::= JOIN_KW nm JOIN */ - 253, /* (122) joinop ::= JOIN_KW nm nm JOIN */ - 255, /* (123) on_opt ::= ON expr */ - 255, /* (124) on_opt ::= */ - 254, /* (125) indexed_opt ::= */ - 254, /* (126) indexed_opt ::= INDEXED BY nm */ - 254, /* (127) indexed_opt ::= NOT INDEXED */ - 256, /* (128) using_opt ::= USING LP idlist RP */ - 256, /* (129) using_opt ::= */ - 244, /* (130) orderby_opt ::= */ - 244, /* (131) orderby_opt ::= ORDER BY sortlist */ - 226, /* (132) sortlist ::= sortlist COMMA expr sortorder nulls */ - 226, /* (133) sortlist ::= expr sortorder nulls */ - 214, /* (134) sortorder ::= ASC */ - 214, /* (135) sortorder ::= DESC */ - 214, /* (136) sortorder ::= */ - 260, /* (137) nulls ::= NULLS FIRST */ - 260, /* (138) nulls ::= NULLS LAST */ - 260, /* (139) nulls ::= */ - 242, /* (140) groupby_opt ::= */ - 242, /* (141) groupby_opt ::= GROUP BY nexprlist */ - 243, /* (142) having_opt ::= */ - 243, /* (143) having_opt ::= HAVING expr */ - 245, /* (144) limit_opt ::= */ - 245, /* (145) limit_opt ::= LIMIT expr */ - 245, /* (146) limit_opt ::= LIMIT expr OFFSET expr */ - 245, /* (147) limit_opt ::= LIMIT expr COMMA expr */ - 186, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt */ - 241, /* (149) where_opt ::= */ - 241, /* (150) where_opt ::= WHERE expr */ - 186, /* (151) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt */ - 262, /* (152) setlist ::= setlist COMMA nm EQ expr */ - 262, /* (153) setlist ::= setlist COMMA LP idlist RP EQ expr */ - 262, /* (154) setlist ::= nm EQ expr */ - 262, /* (155) setlist ::= LP idlist RP EQ expr */ - 186, /* (156) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - 186, /* (157) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */ - 265, /* (158) upsert ::= */ - 265, /* (159) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */ - 265, /* (160) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */ - 265, /* (161) upsert ::= ON CONFLICT DO NOTHING */ - 263, /* (162) insert_cmd ::= INSERT orconf */ - 263, /* (163) insert_cmd ::= REPLACE */ - 264, /* (164) idlist_opt ::= */ - 264, /* (165) idlist_opt ::= LP idlist RP */ - 259, /* (166) idlist ::= idlist COMMA nm */ - 259, /* (167) idlist ::= nm */ - 212, /* (168) expr ::= LP expr RP */ - 212, /* (169) expr ::= ID|INDEXED */ - 212, /* (170) expr ::= JOIN_KW */ - 212, /* (171) expr ::= nm DOT nm */ - 212, /* (172) expr ::= nm DOT nm DOT nm */ - 211, /* (173) term ::= NULL|FLOAT|BLOB */ - 211, /* (174) term ::= STRING */ - 211, /* (175) term ::= INTEGER */ - 212, /* (176) expr ::= VARIABLE */ - 212, /* (177) expr ::= expr COLLATE ID|STRING */ - 212, /* (178) expr ::= CAST LP expr AS typetoken RP */ - 212, /* (179) expr ::= ID|INDEXED LP distinct exprlist RP */ - 212, /* (180) expr ::= ID|INDEXED LP STAR RP */ - 212, /* (181) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - 212, /* (182) expr ::= ID|INDEXED LP STAR RP filter_over */ - 211, /* (183) term ::= CTIME_KW */ - 212, /* (184) expr ::= LP nexprlist COMMA expr RP */ - 212, /* (185) expr ::= expr AND expr */ - 212, /* (186) expr ::= expr OR expr */ - 212, /* (187) expr ::= expr LT|GT|GE|LE expr */ - 212, /* (188) expr ::= expr EQ|NE expr */ - 212, /* (189) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 212, /* (190) expr ::= expr PLUS|MINUS expr */ - 212, /* (191) expr ::= expr STAR|SLASH|REM expr */ - 212, /* (192) expr ::= expr CONCAT expr */ - 267, /* (193) likeop ::= NOT LIKE_KW|MATCH */ - 212, /* (194) expr ::= expr likeop expr */ - 212, /* (195) expr ::= expr likeop expr ESCAPE expr */ - 212, /* (196) expr ::= expr ISNULL|NOTNULL */ - 212, /* (197) expr ::= expr NOT NULL */ - 212, /* (198) expr ::= expr IS expr */ - 212, /* (199) expr ::= expr IS NOT expr */ - 212, /* (200) expr ::= NOT expr */ - 212, /* (201) expr ::= BITNOT expr */ - 212, /* (202) expr ::= PLUS|MINUS expr */ - 268, /* (203) between_op ::= BETWEEN */ - 268, /* (204) between_op ::= NOT BETWEEN */ - 212, /* (205) expr ::= expr between_op expr AND expr */ - 269, /* (206) in_op ::= IN */ - 269, /* (207) in_op ::= NOT IN */ - 212, /* (208) expr ::= expr in_op LP exprlist RP */ - 212, /* (209) expr ::= LP select RP */ - 212, /* (210) expr ::= expr in_op LP select RP */ - 212, /* (211) expr ::= expr in_op nm dbnm paren_exprlist */ - 212, /* (212) expr ::= EXISTS LP select RP */ - 212, /* (213) expr ::= CASE case_operand case_exprlist case_else END */ - 272, /* (214) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 272, /* (215) case_exprlist ::= WHEN expr THEN expr */ - 273, /* (216) case_else ::= ELSE expr */ - 273, /* (217) case_else ::= */ - 271, /* (218) case_operand ::= expr */ - 271, /* (219) case_operand ::= */ - 257, /* (220) exprlist ::= */ - 248, /* (221) nexprlist ::= nexprlist COMMA expr */ - 248, /* (222) nexprlist ::= expr */ - 270, /* (223) paren_exprlist ::= */ - 270, /* (224) paren_exprlist ::= LP exprlist RP */ - 186, /* (225) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 274, /* (226) uniqueflag ::= UNIQUE */ - 274, /* (227) uniqueflag ::= */ - 216, /* (228) eidlist_opt ::= */ - 216, /* (229) eidlist_opt ::= LP eidlist RP */ - 227, /* (230) eidlist ::= eidlist COMMA nm collate sortorder */ - 227, /* (231) eidlist ::= nm collate sortorder */ - 275, /* (232) collate ::= */ - 275, /* (233) collate ::= COLLATE ID|STRING */ - 186, /* (234) cmd ::= DROP INDEX ifexists fullname */ - 186, /* (235) cmd ::= VACUUM vinto */ - 186, /* (236) cmd ::= VACUUM nm vinto */ - 276, /* (237) vinto ::= INTO expr */ - 276, /* (238) vinto ::= */ - 186, /* (239) cmd ::= PRAGMA nm dbnm */ - 186, /* (240) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 186, /* (241) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 186, /* (242) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 186, /* (243) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 206, /* (244) plus_num ::= PLUS INTEGER|FLOAT */ - 207, /* (245) minus_num ::= MINUS INTEGER|FLOAT */ - 186, /* (246) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 278, /* (247) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 280, /* (248) trigger_time ::= BEFORE|AFTER */ - 280, /* (249) trigger_time ::= INSTEAD OF */ - 280, /* (250) trigger_time ::= */ - 281, /* (251) trigger_event ::= DELETE|INSERT */ - 281, /* (252) trigger_event ::= UPDATE */ - 281, /* (253) trigger_event ::= UPDATE OF idlist */ - 283, /* (254) when_clause ::= */ - 283, /* (255) when_clause ::= WHEN expr */ - 279, /* (256) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 279, /* (257) trigger_cmd_list ::= trigger_cmd SEMI */ - 285, /* (258) trnm ::= nm DOT nm */ - 286, /* (259) tridxby ::= INDEXED BY nm */ - 286, /* (260) tridxby ::= NOT INDEXED */ - 284, /* (261) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 284, /* (262) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 284, /* (263) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 284, /* (264) trigger_cmd ::= scanpt select scanpt */ - 212, /* (265) expr ::= RAISE LP IGNORE RP */ - 212, /* (266) expr ::= RAISE LP raisetype COMMA nm RP */ - 231, /* (267) raisetype ::= ROLLBACK */ - 231, /* (268) raisetype ::= ABORT */ - 231, /* (269) raisetype ::= FAIL */ - 186, /* (270) cmd ::= DROP TRIGGER ifexists fullname */ - 186, /* (271) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 186, /* (272) cmd ::= DETACH database_kw_opt expr */ - 288, /* (273) key_opt ::= */ - 288, /* (274) key_opt ::= KEY expr */ - 186, /* (275) cmd ::= REINDEX */ - 186, /* (276) cmd ::= REINDEX nm dbnm */ - 186, /* (277) cmd ::= ANALYZE */ - 186, /* (278) cmd ::= ANALYZE nm dbnm */ - 186, /* (279) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 186, /* (280) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 289, /* (281) add_column_fullname ::= fullname */ - 186, /* (282) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 186, /* (283) cmd ::= create_vtab */ - 186, /* (284) cmd ::= create_vtab LP vtabarglist RP */ - 291, /* (285) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 293, /* (286) vtabarg ::= */ - 294, /* (287) vtabargtoken ::= ANY */ - 294, /* (288) vtabargtoken ::= lp anylist RP */ - 295, /* (289) lp ::= LP */ - 261, /* (290) with ::= WITH wqlist */ - 261, /* (291) with ::= WITH RECURSIVE wqlist */ - 236, /* (292) wqlist ::= nm eidlist_opt AS LP select RP */ - 236, /* (293) wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */ - 297, /* (294) windowdefn_list ::= windowdefn */ - 297, /* (295) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 298, /* (296) windowdefn ::= nm AS LP window RP */ - 299, /* (297) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 299, /* (298) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 299, /* (299) window ::= ORDER BY sortlist frame_opt */ - 299, /* (300) window ::= nm ORDER BY sortlist frame_opt */ - 299, /* (301) window ::= frame_opt */ - 299, /* (302) window ::= nm frame_opt */ - 300, /* (303) frame_opt ::= */ - 300, /* (304) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 300, /* (305) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 304, /* (306) range_or_rows ::= RANGE|ROWS|GROUPS */ - 306, /* (307) frame_bound_s ::= frame_bound */ - 306, /* (308) frame_bound_s ::= UNBOUNDED PRECEDING */ - 307, /* (309) frame_bound_e ::= frame_bound */ - 307, /* (310) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 305, /* (311) frame_bound ::= expr PRECEDING|FOLLOWING */ - 305, /* (312) frame_bound ::= CURRENT ROW */ - 308, /* (313) frame_exclude_opt ::= */ - 308, /* (314) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 309, /* (315) frame_exclude ::= NO OTHERS */ - 309, /* (316) frame_exclude ::= CURRENT ROW */ - 309, /* (317) frame_exclude ::= GROUP|TIES */ - 246, /* (318) window_clause ::= WINDOW windowdefn_list */ - 266, /* (319) filter_over ::= filter_clause over_clause */ - 266, /* (320) filter_over ::= over_clause */ - 266, /* (321) filter_over ::= filter_clause */ - 303, /* (322) over_clause ::= OVER LP window RP */ - 303, /* (323) over_clause ::= OVER nm */ - 302, /* (324) filter_clause ::= FILTER LP WHERE expr RP */ - 181, /* (325) input ::= cmdlist */ - 182, /* (326) cmdlist ::= cmdlist ecmd */ - 182, /* (327) cmdlist ::= ecmd */ - 183, /* (328) ecmd ::= SEMI */ - 183, /* (329) ecmd ::= cmdx SEMI */ - 183, /* (330) ecmd ::= explain cmdx SEMI */ - 188, /* (331) trans_opt ::= */ - 188, /* (332) trans_opt ::= TRANSACTION */ - 188, /* (333) trans_opt ::= TRANSACTION nm */ - 190, /* (334) savepoint_opt ::= SAVEPOINT */ - 190, /* (335) savepoint_opt ::= */ - 186, /* (336) cmd ::= create_table create_table_args */ - 197, /* (337) columnlist ::= columnlist COMMA columnname carglist */ - 197, /* (338) columnlist ::= columnname carglist */ - 189, /* (339) nm ::= ID|INDEXED */ - 189, /* (340) nm ::= STRING */ - 189, /* (341) nm ::= JOIN_KW */ - 203, /* (342) typetoken ::= typename */ - 204, /* (343) typename ::= ID|STRING */ - 205, /* (344) signed ::= plus_num */ - 205, /* (345) signed ::= minus_num */ - 202, /* (346) carglist ::= carglist ccons */ - 202, /* (347) carglist ::= */ - 210, /* (348) ccons ::= NULL onconf */ - 210, /* (349) ccons ::= GENERATED ALWAYS AS generated */ - 210, /* (350) ccons ::= AS generated */ - 198, /* (351) conslist_opt ::= COMMA conslist */ - 223, /* (352) conslist ::= conslist tconscomma tcons */ - 223, /* (353) conslist ::= tcons */ - 224, /* (354) tconscomma ::= */ - 228, /* (355) defer_subclause_opt ::= defer_subclause */ - 230, /* (356) resolvetype ::= raisetype */ - 234, /* (357) selectnowith ::= oneselect */ - 235, /* (358) oneselect ::= values */ - 249, /* (359) sclp ::= selcollist COMMA */ - 250, /* (360) as ::= ID|STRING */ - 212, /* (361) expr ::= term */ - 267, /* (362) likeop ::= LIKE_KW|MATCH */ - 257, /* (363) exprlist ::= nexprlist */ - 277, /* (364) nmnum ::= plus_num */ - 277, /* (365) nmnum ::= nm */ - 277, /* (366) nmnum ::= ON */ - 277, /* (367) nmnum ::= DELETE */ - 277, /* (368) nmnum ::= DEFAULT */ - 206, /* (369) plus_num ::= INTEGER|FLOAT */ - 282, /* (370) foreach_clause ::= */ - 282, /* (371) foreach_clause ::= FOR EACH ROW */ - 285, /* (372) trnm ::= nm */ - 286, /* (373) tridxby ::= */ - 287, /* (374) database_kw_opt ::= DATABASE */ - 287, /* (375) database_kw_opt ::= */ - 290, /* (376) kwcolumn_opt ::= */ - 290, /* (377) kwcolumn_opt ::= COLUMNKW */ - 292, /* (378) vtabarglist ::= vtabarg */ - 292, /* (379) vtabarglist ::= vtabarglist COMMA vtabarg */ - 293, /* (380) vtabarg ::= vtabarg vtabargtoken */ - 296, /* (381) anylist ::= */ - 296, /* (382) anylist ::= anylist LP anylist RP */ - 296, /* (383) anylist ::= anylist ANY */ - 261, /* (384) with ::= */ + 187, /* (0) explain ::= EXPLAIN */ + 187, /* (1) explain ::= EXPLAIN QUERY PLAN */ + 186, /* (2) cmdx ::= cmd */ + 188, /* (3) cmd ::= BEGIN transtype trans_opt */ + 189, /* (4) transtype ::= */ + 189, /* (5) transtype ::= DEFERRED */ + 189, /* (6) transtype ::= IMMEDIATE */ + 189, /* (7) transtype ::= EXCLUSIVE */ + 188, /* (8) cmd ::= COMMIT|END trans_opt */ + 188, /* (9) cmd ::= ROLLBACK trans_opt */ + 188, /* (10) cmd ::= SAVEPOINT nm */ + 188, /* (11) cmd ::= RELEASE savepoint_opt nm */ + 188, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ + 193, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ + 195, /* (14) createkw ::= CREATE */ + 197, /* (15) ifnotexists ::= */ + 197, /* (16) ifnotexists ::= IF NOT EXISTS */ + 196, /* (17) temp ::= TEMP */ + 196, /* (18) temp ::= */ + 194, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */ + 194, /* (20) create_table_args ::= AS select */ + 201, /* (21) table_options ::= */ + 201, /* (22) table_options ::= WITHOUT nm */ + 203, /* (23) columnname ::= nm typetoken */ + 205, /* (24) typetoken ::= */ + 205, /* (25) typetoken ::= typename LP signed RP */ + 205, /* (26) typetoken ::= typename LP signed COMMA signed RP */ + 206, /* (27) typename ::= typename ID|STRING */ + 210, /* (28) scanpt ::= */ + 211, /* (29) scantok ::= */ + 212, /* (30) ccons ::= CONSTRAINT nm */ + 212, /* (31) ccons ::= DEFAULT scantok term */ + 212, /* (32) ccons ::= DEFAULT LP expr RP */ + 212, /* (33) ccons ::= DEFAULT PLUS scantok term */ + 212, /* (34) ccons ::= DEFAULT MINUS scantok term */ + 212, /* (35) ccons ::= DEFAULT scantok ID|INDEXED */ + 212, /* (36) ccons ::= NOT NULL onconf */ + 212, /* (37) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + 212, /* (38) ccons ::= UNIQUE onconf */ + 212, /* (39) ccons ::= CHECK LP expr RP */ + 212, /* (40) ccons ::= REFERENCES nm eidlist_opt refargs */ + 212, /* (41) ccons ::= defer_subclause */ + 212, /* (42) ccons ::= COLLATE ID|STRING */ + 221, /* (43) generated ::= LP expr RP */ + 221, /* (44) generated ::= LP expr RP ID */ + 217, /* (45) autoinc ::= */ + 217, /* (46) autoinc ::= AUTOINCR */ + 219, /* (47) refargs ::= */ + 219, /* (48) refargs ::= refargs refarg */ + 222, /* (49) refarg ::= MATCH nm */ + 222, /* (50) refarg ::= ON INSERT refact */ + 222, /* (51) refarg ::= ON DELETE refact */ + 222, /* (52) refarg ::= ON UPDATE refact */ + 223, /* (53) refact ::= SET NULL */ + 223, /* (54) refact ::= SET DEFAULT */ + 223, /* (55) refact ::= CASCADE */ + 223, /* (56) refact ::= RESTRICT */ + 223, /* (57) refact ::= NO ACTION */ + 220, /* (58) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + 220, /* (59) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 224, /* (60) init_deferred_pred_opt ::= */ + 224, /* (61) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + 224, /* (62) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 200, /* (63) conslist_opt ::= */ + 226, /* (64) tconscomma ::= COMMA */ + 227, /* (65) tcons ::= CONSTRAINT nm */ + 227, /* (66) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + 227, /* (67) tcons ::= UNIQUE LP sortlist RP onconf */ + 227, /* (68) tcons ::= CHECK LP expr RP onconf */ + 227, /* (69) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 230, /* (70) defer_subclause_opt ::= */ + 215, /* (71) onconf ::= */ + 215, /* (72) onconf ::= ON CONFLICT resolvetype */ + 231, /* (73) orconf ::= */ + 231, /* (74) orconf ::= OR resolvetype */ + 232, /* (75) resolvetype ::= IGNORE */ + 232, /* (76) resolvetype ::= REPLACE */ + 188, /* (77) cmd ::= DROP TABLE ifexists fullname */ + 234, /* (78) ifexists ::= IF EXISTS */ + 234, /* (79) ifexists ::= */ + 188, /* (80) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + 188, /* (81) cmd ::= DROP VIEW ifexists fullname */ + 188, /* (82) cmd ::= select */ + 202, /* (83) select ::= WITH wqlist selectnowith */ + 202, /* (84) select ::= WITH RECURSIVE wqlist selectnowith */ + 202, /* (85) select ::= selectnowith */ + 236, /* (86) selectnowith ::= selectnowith multiselect_op oneselect */ + 239, /* (87) multiselect_op ::= UNION */ + 239, /* (88) multiselect_op ::= UNION ALL */ + 239, /* (89) multiselect_op ::= EXCEPT|INTERSECT */ + 237, /* (90) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + 237, /* (91) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + 249, /* (92) values ::= VALUES LP nexprlist RP */ + 249, /* (93) values ::= values COMMA LP nexprlist RP */ + 240, /* (94) distinct ::= DISTINCT */ + 240, /* (95) distinct ::= ALL */ + 240, /* (96) distinct ::= */ + 251, /* (97) sclp ::= */ + 241, /* (98) selcollist ::= sclp scanpt expr scanpt as */ + 241, /* (99) selcollist ::= sclp scanpt STAR */ + 241, /* (100) selcollist ::= sclp scanpt nm DOT STAR */ + 252, /* (101) as ::= AS nm */ + 252, /* (102) as ::= */ + 242, /* (103) from ::= */ + 242, /* (104) from ::= FROM seltablist */ + 254, /* (105) stl_prefix ::= seltablist joinop */ + 254, /* (106) stl_prefix ::= */ + 253, /* (107) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ + 253, /* (108) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ + 253, /* (109) seltablist ::= stl_prefix LP select RP as on_opt using_opt */ + 253, /* (110) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ + 198, /* (111) dbnm ::= */ + 198, /* (112) dbnm ::= DOT nm */ + 235, /* (113) fullname ::= nm */ + 235, /* (114) fullname ::= nm DOT nm */ + 260, /* (115) xfullname ::= nm */ + 260, /* (116) xfullname ::= nm DOT nm */ + 260, /* (117) xfullname ::= nm DOT nm AS nm */ + 260, /* (118) xfullname ::= nm AS nm */ + 255, /* (119) joinop ::= COMMA|JOIN */ + 255, /* (120) joinop ::= JOIN_KW JOIN */ + 255, /* (121) joinop ::= JOIN_KW nm JOIN */ + 255, /* (122) joinop ::= JOIN_KW nm nm JOIN */ + 257, /* (123) on_opt ::= ON expr */ + 257, /* (124) on_opt ::= */ + 256, /* (125) indexed_opt ::= */ + 256, /* (126) indexed_opt ::= INDEXED BY nm */ + 256, /* (127) indexed_opt ::= NOT INDEXED */ + 258, /* (128) using_opt ::= USING LP idlist RP */ + 258, /* (129) using_opt ::= */ + 246, /* (130) orderby_opt ::= */ + 246, /* (131) orderby_opt ::= ORDER BY sortlist */ + 228, /* (132) sortlist ::= sortlist COMMA expr sortorder nulls */ + 228, /* (133) sortlist ::= expr sortorder nulls */ + 216, /* (134) sortorder ::= ASC */ + 216, /* (135) sortorder ::= DESC */ + 216, /* (136) sortorder ::= */ + 262, /* (137) nulls ::= NULLS FIRST */ + 262, /* (138) nulls ::= NULLS LAST */ + 262, /* (139) nulls ::= */ + 244, /* (140) groupby_opt ::= */ + 244, /* (141) groupby_opt ::= GROUP BY nexprlist */ + 245, /* (142) having_opt ::= */ + 245, /* (143) having_opt ::= HAVING expr */ + 247, /* (144) limit_opt ::= */ + 247, /* (145) limit_opt ::= LIMIT expr */ + 247, /* (146) limit_opt ::= LIMIT expr OFFSET expr */ + 247, /* (147) limit_opt ::= LIMIT expr COMMA expr */ + 188, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ + 243, /* (149) where_opt ::= */ + 243, /* (150) where_opt ::= WHERE expr */ + 264, /* (151) where_opt_ret ::= */ + 264, /* (152) where_opt_ret ::= WHERE expr */ + 264, /* (153) where_opt_ret ::= RETURNING selcollist */ + 264, /* (154) where_opt_ret ::= WHERE expr RETURNING selcollist */ + 188, /* (155) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + 265, /* (156) setlist ::= setlist COMMA nm EQ expr */ + 265, /* (157) setlist ::= setlist COMMA LP idlist RP EQ expr */ + 265, /* (158) setlist ::= nm EQ expr */ + 265, /* (159) setlist ::= LP idlist RP EQ expr */ + 188, /* (160) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + 188, /* (161) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 268, /* (162) upsert ::= */ + 268, /* (163) upsert ::= RETURNING selcollist */ + 268, /* (164) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + 268, /* (165) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + 268, /* (166) upsert ::= ON CONFLICT DO NOTHING returning */ + 268, /* (167) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + 269, /* (168) returning ::= RETURNING selcollist */ + 266, /* (169) insert_cmd ::= INSERT orconf */ + 266, /* (170) insert_cmd ::= REPLACE */ + 267, /* (171) idlist_opt ::= */ + 267, /* (172) idlist_opt ::= LP idlist RP */ + 261, /* (173) idlist ::= idlist COMMA nm */ + 261, /* (174) idlist ::= nm */ + 214, /* (175) expr ::= LP expr RP */ + 214, /* (176) expr ::= ID|INDEXED */ + 214, /* (177) expr ::= JOIN_KW */ + 214, /* (178) expr ::= nm DOT nm */ + 214, /* (179) expr ::= nm DOT nm DOT nm */ + 213, /* (180) term ::= NULL|FLOAT|BLOB */ + 213, /* (181) term ::= STRING */ + 213, /* (182) term ::= INTEGER */ + 214, /* (183) expr ::= VARIABLE */ + 214, /* (184) expr ::= expr COLLATE ID|STRING */ + 214, /* (185) expr ::= CAST LP expr AS typetoken RP */ + 214, /* (186) expr ::= ID|INDEXED LP distinct exprlist RP */ + 214, /* (187) expr ::= ID|INDEXED LP STAR RP */ + 214, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + 214, /* (189) expr ::= ID|INDEXED LP STAR RP filter_over */ + 213, /* (190) term ::= CTIME_KW */ + 214, /* (191) expr ::= LP nexprlist COMMA expr RP */ + 214, /* (192) expr ::= expr AND expr */ + 214, /* (193) expr ::= expr OR expr */ + 214, /* (194) expr ::= expr LT|GT|GE|LE expr */ + 214, /* (195) expr ::= expr EQ|NE expr */ + 214, /* (196) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 214, /* (197) expr ::= expr PLUS|MINUS expr */ + 214, /* (198) expr ::= expr STAR|SLASH|REM expr */ + 214, /* (199) expr ::= expr CONCAT expr */ + 271, /* (200) likeop ::= NOT LIKE_KW|MATCH */ + 214, /* (201) expr ::= expr likeop expr */ + 214, /* (202) expr ::= expr likeop expr ESCAPE expr */ + 214, /* (203) expr ::= expr ISNULL|NOTNULL */ + 214, /* (204) expr ::= expr NOT NULL */ + 214, /* (205) expr ::= expr IS expr */ + 214, /* (206) expr ::= expr IS NOT expr */ + 214, /* (207) expr ::= NOT expr */ + 214, /* (208) expr ::= BITNOT expr */ + 214, /* (209) expr ::= PLUS|MINUS expr */ + 272, /* (210) between_op ::= BETWEEN */ + 272, /* (211) between_op ::= NOT BETWEEN */ + 214, /* (212) expr ::= expr between_op expr AND expr */ + 273, /* (213) in_op ::= IN */ + 273, /* (214) in_op ::= NOT IN */ + 214, /* (215) expr ::= expr in_op LP exprlist RP */ + 214, /* (216) expr ::= LP select RP */ + 214, /* (217) expr ::= expr in_op LP select RP */ + 214, /* (218) expr ::= expr in_op nm dbnm paren_exprlist */ + 214, /* (219) expr ::= EXISTS LP select RP */ + 214, /* (220) expr ::= CASE case_operand case_exprlist case_else END */ + 276, /* (221) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 276, /* (222) case_exprlist ::= WHEN expr THEN expr */ + 277, /* (223) case_else ::= ELSE expr */ + 277, /* (224) case_else ::= */ + 275, /* (225) case_operand ::= expr */ + 275, /* (226) case_operand ::= */ + 259, /* (227) exprlist ::= */ + 250, /* (228) nexprlist ::= nexprlist COMMA expr */ + 250, /* (229) nexprlist ::= expr */ + 274, /* (230) paren_exprlist ::= */ + 274, /* (231) paren_exprlist ::= LP exprlist RP */ + 188, /* (232) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 278, /* (233) uniqueflag ::= UNIQUE */ + 278, /* (234) uniqueflag ::= */ + 218, /* (235) eidlist_opt ::= */ + 218, /* (236) eidlist_opt ::= LP eidlist RP */ + 229, /* (237) eidlist ::= eidlist COMMA nm collate sortorder */ + 229, /* (238) eidlist ::= nm collate sortorder */ + 279, /* (239) collate ::= */ + 279, /* (240) collate ::= COLLATE ID|STRING */ + 188, /* (241) cmd ::= DROP INDEX ifexists fullname */ + 188, /* (242) cmd ::= VACUUM vinto */ + 188, /* (243) cmd ::= VACUUM nm vinto */ + 280, /* (244) vinto ::= INTO expr */ + 280, /* (245) vinto ::= */ + 188, /* (246) cmd ::= PRAGMA nm dbnm */ + 188, /* (247) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 188, /* (248) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 188, /* (249) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 188, /* (250) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 208, /* (251) plus_num ::= PLUS INTEGER|FLOAT */ + 209, /* (252) minus_num ::= MINUS INTEGER|FLOAT */ + 188, /* (253) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 282, /* (254) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 284, /* (255) trigger_time ::= BEFORE|AFTER */ + 284, /* (256) trigger_time ::= INSTEAD OF */ + 284, /* (257) trigger_time ::= */ + 285, /* (258) trigger_event ::= DELETE|INSERT */ + 285, /* (259) trigger_event ::= UPDATE */ + 285, /* (260) trigger_event ::= UPDATE OF idlist */ + 287, /* (261) when_clause ::= */ + 287, /* (262) when_clause ::= WHEN expr */ + 283, /* (263) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 283, /* (264) trigger_cmd_list ::= trigger_cmd SEMI */ + 289, /* (265) trnm ::= nm DOT nm */ + 290, /* (266) tridxby ::= INDEXED BY nm */ + 290, /* (267) tridxby ::= NOT INDEXED */ + 288, /* (268) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 288, /* (269) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 288, /* (270) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 288, /* (271) trigger_cmd ::= scanpt select scanpt */ + 214, /* (272) expr ::= RAISE LP IGNORE RP */ + 214, /* (273) expr ::= RAISE LP raisetype COMMA nm RP */ + 233, /* (274) raisetype ::= ROLLBACK */ + 233, /* (275) raisetype ::= ABORT */ + 233, /* (276) raisetype ::= FAIL */ + 188, /* (277) cmd ::= DROP TRIGGER ifexists fullname */ + 188, /* (278) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 188, /* (279) cmd ::= DETACH database_kw_opt expr */ + 292, /* (280) key_opt ::= */ + 292, /* (281) key_opt ::= KEY expr */ + 188, /* (282) cmd ::= REINDEX */ + 188, /* (283) cmd ::= REINDEX nm dbnm */ + 188, /* (284) cmd ::= ANALYZE */ + 188, /* (285) cmd ::= ANALYZE nm dbnm */ + 188, /* (286) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 188, /* (287) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 188, /* (288) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 293, /* (289) add_column_fullname ::= fullname */ + 188, /* (290) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 188, /* (291) cmd ::= create_vtab */ + 188, /* (292) cmd ::= create_vtab LP vtabarglist RP */ + 295, /* (293) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 297, /* (294) vtabarg ::= */ + 298, /* (295) vtabargtoken ::= ANY */ + 298, /* (296) vtabargtoken ::= lp anylist RP */ + 299, /* (297) lp ::= LP */ + 263, /* (298) with ::= WITH wqlist */ + 263, /* (299) with ::= WITH RECURSIVE wqlist */ + 302, /* (300) wqas ::= AS */ + 302, /* (301) wqas ::= AS MATERIALIZED */ + 302, /* (302) wqas ::= AS NOT MATERIALIZED */ + 301, /* (303) wqitem ::= nm eidlist_opt wqas LP select RP */ + 238, /* (304) wqlist ::= wqitem */ + 238, /* (305) wqlist ::= wqlist COMMA wqitem */ + 303, /* (306) windowdefn_list ::= windowdefn */ + 303, /* (307) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 304, /* (308) windowdefn ::= nm AS LP window RP */ + 305, /* (309) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 305, /* (310) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 305, /* (311) window ::= ORDER BY sortlist frame_opt */ + 305, /* (312) window ::= nm ORDER BY sortlist frame_opt */ + 305, /* (313) window ::= frame_opt */ + 305, /* (314) window ::= nm frame_opt */ + 306, /* (315) frame_opt ::= */ + 306, /* (316) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 306, /* (317) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 310, /* (318) range_or_rows ::= RANGE|ROWS|GROUPS */ + 312, /* (319) frame_bound_s ::= frame_bound */ + 312, /* (320) frame_bound_s ::= UNBOUNDED PRECEDING */ + 313, /* (321) frame_bound_e ::= frame_bound */ + 313, /* (322) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 311, /* (323) frame_bound ::= expr PRECEDING|FOLLOWING */ + 311, /* (324) frame_bound ::= CURRENT ROW */ + 314, /* (325) frame_exclude_opt ::= */ + 314, /* (326) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 315, /* (327) frame_exclude ::= NO OTHERS */ + 315, /* (328) frame_exclude ::= CURRENT ROW */ + 315, /* (329) frame_exclude ::= GROUP|TIES */ + 248, /* (330) window_clause ::= WINDOW windowdefn_list */ + 270, /* (331) filter_over ::= filter_clause over_clause */ + 270, /* (332) filter_over ::= over_clause */ + 270, /* (333) filter_over ::= filter_clause */ + 309, /* (334) over_clause ::= OVER LP window RP */ + 309, /* (335) over_clause ::= OVER nm */ + 308, /* (336) filter_clause ::= FILTER LP WHERE expr RP */ + 183, /* (337) input ::= cmdlist */ + 184, /* (338) cmdlist ::= cmdlist ecmd */ + 184, /* (339) cmdlist ::= ecmd */ + 185, /* (340) ecmd ::= SEMI */ + 185, /* (341) ecmd ::= cmdx SEMI */ + 185, /* (342) ecmd ::= explain cmdx SEMI */ + 190, /* (343) trans_opt ::= */ + 190, /* (344) trans_opt ::= TRANSACTION */ + 190, /* (345) trans_opt ::= TRANSACTION nm */ + 192, /* (346) savepoint_opt ::= SAVEPOINT */ + 192, /* (347) savepoint_opt ::= */ + 188, /* (348) cmd ::= create_table create_table_args */ + 199, /* (349) columnlist ::= columnlist COMMA columnname carglist */ + 199, /* (350) columnlist ::= columnname carglist */ + 191, /* (351) nm ::= ID|INDEXED */ + 191, /* (352) nm ::= STRING */ + 191, /* (353) nm ::= JOIN_KW */ + 205, /* (354) typetoken ::= typename */ + 206, /* (355) typename ::= ID|STRING */ + 207, /* (356) signed ::= plus_num */ + 207, /* (357) signed ::= minus_num */ + 204, /* (358) carglist ::= carglist ccons */ + 204, /* (359) carglist ::= */ + 212, /* (360) ccons ::= NULL onconf */ + 212, /* (361) ccons ::= GENERATED ALWAYS AS generated */ + 212, /* (362) ccons ::= AS generated */ + 200, /* (363) conslist_opt ::= COMMA conslist */ + 225, /* (364) conslist ::= conslist tconscomma tcons */ + 225, /* (365) conslist ::= tcons */ + 226, /* (366) tconscomma ::= */ + 230, /* (367) defer_subclause_opt ::= defer_subclause */ + 232, /* (368) resolvetype ::= raisetype */ + 236, /* (369) selectnowith ::= oneselect */ + 237, /* (370) oneselect ::= values */ + 251, /* (371) sclp ::= selcollist COMMA */ + 252, /* (372) as ::= ID|STRING */ + 269, /* (373) returning ::= */ + 214, /* (374) expr ::= term */ + 271, /* (375) likeop ::= LIKE_KW|MATCH */ + 259, /* (376) exprlist ::= nexprlist */ + 281, /* (377) nmnum ::= plus_num */ + 281, /* (378) nmnum ::= nm */ + 281, /* (379) nmnum ::= ON */ + 281, /* (380) nmnum ::= DELETE */ + 281, /* (381) nmnum ::= DEFAULT */ + 208, /* (382) plus_num ::= INTEGER|FLOAT */ + 286, /* (383) foreach_clause ::= */ + 286, /* (384) foreach_clause ::= FOR EACH ROW */ + 289, /* (385) trnm ::= nm */ + 290, /* (386) tridxby ::= */ + 291, /* (387) database_kw_opt ::= DATABASE */ + 291, /* (388) database_kw_opt ::= */ + 294, /* (389) kwcolumn_opt ::= */ + 294, /* (390) kwcolumn_opt ::= COLUMNKW */ + 296, /* (391) vtabarglist ::= vtabarg */ + 296, /* (392) vtabarglist ::= vtabarglist COMMA vtabarg */ + 297, /* (393) vtabarg ::= vtabarg vtabargtoken */ + 300, /* (394) anylist ::= */ + 300, /* (395) anylist ::= anylist LP anylist RP */ + 300, /* (396) anylist ::= anylist ANY */ + 263, /* (397) with ::= */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -158015,243 +160229,256 @@ static const signed char yyRuleInfoNRhs[] = { -2, /* (145) limit_opt ::= LIMIT expr */ -4, /* (146) limit_opt ::= LIMIT expr OFFSET expr */ -4, /* (147) limit_opt ::= LIMIT expr COMMA expr */ - -6, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt */ + -6, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ 0, /* (149) where_opt ::= */ -2, /* (150) where_opt ::= WHERE expr */ - -9, /* (151) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt */ - -5, /* (152) setlist ::= setlist COMMA nm EQ expr */ - -7, /* (153) setlist ::= setlist COMMA LP idlist RP EQ expr */ - -3, /* (154) setlist ::= nm EQ expr */ - -5, /* (155) setlist ::= LP idlist RP EQ expr */ - -7, /* (156) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - -7, /* (157) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */ - 0, /* (158) upsert ::= */ - -11, /* (159) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */ - -8, /* (160) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */ - -4, /* (161) upsert ::= ON CONFLICT DO NOTHING */ - -2, /* (162) insert_cmd ::= INSERT orconf */ - -1, /* (163) insert_cmd ::= REPLACE */ - 0, /* (164) idlist_opt ::= */ - -3, /* (165) idlist_opt ::= LP idlist RP */ - -3, /* (166) idlist ::= idlist COMMA nm */ - -1, /* (167) idlist ::= nm */ - -3, /* (168) expr ::= LP expr RP */ - -1, /* (169) expr ::= ID|INDEXED */ - -1, /* (170) expr ::= JOIN_KW */ - -3, /* (171) expr ::= nm DOT nm */ - -5, /* (172) expr ::= nm DOT nm DOT nm */ - -1, /* (173) term ::= NULL|FLOAT|BLOB */ - -1, /* (174) term ::= STRING */ - -1, /* (175) term ::= INTEGER */ - -1, /* (176) expr ::= VARIABLE */ - -3, /* (177) expr ::= expr COLLATE ID|STRING */ - -6, /* (178) expr ::= CAST LP expr AS typetoken RP */ - -5, /* (179) expr ::= ID|INDEXED LP distinct exprlist RP */ - -4, /* (180) expr ::= ID|INDEXED LP STAR RP */ - -6, /* (181) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - -5, /* (182) expr ::= ID|INDEXED LP STAR RP filter_over */ - -1, /* (183) term ::= CTIME_KW */ - -5, /* (184) expr ::= LP nexprlist COMMA expr RP */ - -3, /* (185) expr ::= expr AND expr */ - -3, /* (186) expr ::= expr OR expr */ - -3, /* (187) expr ::= expr LT|GT|GE|LE expr */ - -3, /* (188) expr ::= expr EQ|NE expr */ - -3, /* (189) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - -3, /* (190) expr ::= expr PLUS|MINUS expr */ - -3, /* (191) expr ::= expr STAR|SLASH|REM expr */ - -3, /* (192) expr ::= expr CONCAT expr */ - -2, /* (193) likeop ::= NOT LIKE_KW|MATCH */ - -3, /* (194) expr ::= expr likeop expr */ - -5, /* (195) expr ::= expr likeop expr ESCAPE expr */ - -2, /* (196) expr ::= expr ISNULL|NOTNULL */ - -3, /* (197) expr ::= expr NOT NULL */ - -3, /* (198) expr ::= expr IS expr */ - -4, /* (199) expr ::= expr IS NOT expr */ - -2, /* (200) expr ::= NOT expr */ - -2, /* (201) expr ::= BITNOT expr */ - -2, /* (202) expr ::= PLUS|MINUS expr */ - -1, /* (203) between_op ::= BETWEEN */ - -2, /* (204) between_op ::= NOT BETWEEN */ - -5, /* (205) expr ::= expr between_op expr AND expr */ - -1, /* (206) in_op ::= IN */ - -2, /* (207) in_op ::= NOT IN */ - -5, /* (208) expr ::= expr in_op LP exprlist RP */ - -3, /* (209) expr ::= LP select RP */ - -5, /* (210) expr ::= expr in_op LP select RP */ - -5, /* (211) expr ::= expr in_op nm dbnm paren_exprlist */ - -4, /* (212) expr ::= EXISTS LP select RP */ - -5, /* (213) expr ::= CASE case_operand case_exprlist case_else END */ - -5, /* (214) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - -4, /* (215) case_exprlist ::= WHEN expr THEN expr */ - -2, /* (216) case_else ::= ELSE expr */ - 0, /* (217) case_else ::= */ - -1, /* (218) case_operand ::= expr */ - 0, /* (219) case_operand ::= */ - 0, /* (220) exprlist ::= */ - -3, /* (221) nexprlist ::= nexprlist COMMA expr */ - -1, /* (222) nexprlist ::= expr */ - 0, /* (223) paren_exprlist ::= */ - -3, /* (224) paren_exprlist ::= LP exprlist RP */ - -12, /* (225) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - -1, /* (226) uniqueflag ::= UNIQUE */ - 0, /* (227) uniqueflag ::= */ - 0, /* (228) eidlist_opt ::= */ - -3, /* (229) eidlist_opt ::= LP eidlist RP */ - -5, /* (230) eidlist ::= eidlist COMMA nm collate sortorder */ - -3, /* (231) eidlist ::= nm collate sortorder */ - 0, /* (232) collate ::= */ - -2, /* (233) collate ::= COLLATE ID|STRING */ - -4, /* (234) cmd ::= DROP INDEX ifexists fullname */ - -2, /* (235) cmd ::= VACUUM vinto */ - -3, /* (236) cmd ::= VACUUM nm vinto */ - -2, /* (237) vinto ::= INTO expr */ - 0, /* (238) vinto ::= */ - -3, /* (239) cmd ::= PRAGMA nm dbnm */ - -5, /* (240) cmd ::= PRAGMA nm dbnm EQ nmnum */ - -6, /* (241) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - -5, /* (242) cmd ::= PRAGMA nm dbnm EQ minus_num */ - -6, /* (243) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - -2, /* (244) plus_num ::= PLUS INTEGER|FLOAT */ - -2, /* (245) minus_num ::= MINUS INTEGER|FLOAT */ - -5, /* (246) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - -11, /* (247) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - -1, /* (248) trigger_time ::= BEFORE|AFTER */ - -2, /* (249) trigger_time ::= INSTEAD OF */ - 0, /* (250) trigger_time ::= */ - -1, /* (251) trigger_event ::= DELETE|INSERT */ - -1, /* (252) trigger_event ::= UPDATE */ - -3, /* (253) trigger_event ::= UPDATE OF idlist */ - 0, /* (254) when_clause ::= */ - -2, /* (255) when_clause ::= WHEN expr */ - -3, /* (256) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - -2, /* (257) trigger_cmd_list ::= trigger_cmd SEMI */ - -3, /* (258) trnm ::= nm DOT nm */ - -3, /* (259) tridxby ::= INDEXED BY nm */ - -2, /* (260) tridxby ::= NOT INDEXED */ - -9, /* (261) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - -8, /* (262) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - -6, /* (263) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - -3, /* (264) trigger_cmd ::= scanpt select scanpt */ - -4, /* (265) expr ::= RAISE LP IGNORE RP */ - -6, /* (266) expr ::= RAISE LP raisetype COMMA nm RP */ - -1, /* (267) raisetype ::= ROLLBACK */ - -1, /* (268) raisetype ::= ABORT */ - -1, /* (269) raisetype ::= FAIL */ - -4, /* (270) cmd ::= DROP TRIGGER ifexists fullname */ - -6, /* (271) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - -3, /* (272) cmd ::= DETACH database_kw_opt expr */ - 0, /* (273) key_opt ::= */ - -2, /* (274) key_opt ::= KEY expr */ - -1, /* (275) cmd ::= REINDEX */ - -3, /* (276) cmd ::= REINDEX nm dbnm */ - -1, /* (277) cmd ::= ANALYZE */ - -3, /* (278) cmd ::= ANALYZE nm dbnm */ - -6, /* (279) cmd ::= ALTER TABLE fullname RENAME TO nm */ - -7, /* (280) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - -1, /* (281) add_column_fullname ::= fullname */ - -8, /* (282) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - -1, /* (283) cmd ::= create_vtab */ - -4, /* (284) cmd ::= create_vtab LP vtabarglist RP */ - -8, /* (285) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 0, /* (286) vtabarg ::= */ - -1, /* (287) vtabargtoken ::= ANY */ - -3, /* (288) vtabargtoken ::= lp anylist RP */ - -1, /* (289) lp ::= LP */ - -2, /* (290) with ::= WITH wqlist */ - -3, /* (291) with ::= WITH RECURSIVE wqlist */ - -6, /* (292) wqlist ::= nm eidlist_opt AS LP select RP */ - -8, /* (293) wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */ - -1, /* (294) windowdefn_list ::= windowdefn */ - -3, /* (295) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - -5, /* (296) windowdefn ::= nm AS LP window RP */ - -5, /* (297) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - -6, /* (298) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - -4, /* (299) window ::= ORDER BY sortlist frame_opt */ - -5, /* (300) window ::= nm ORDER BY sortlist frame_opt */ - -1, /* (301) window ::= frame_opt */ - -2, /* (302) window ::= nm frame_opt */ - 0, /* (303) frame_opt ::= */ - -3, /* (304) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - -6, /* (305) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - -1, /* (306) range_or_rows ::= RANGE|ROWS|GROUPS */ - -1, /* (307) frame_bound_s ::= frame_bound */ - -2, /* (308) frame_bound_s ::= UNBOUNDED PRECEDING */ - -1, /* (309) frame_bound_e ::= frame_bound */ - -2, /* (310) frame_bound_e ::= UNBOUNDED FOLLOWING */ - -2, /* (311) frame_bound ::= expr PRECEDING|FOLLOWING */ - -2, /* (312) frame_bound ::= CURRENT ROW */ - 0, /* (313) frame_exclude_opt ::= */ - -2, /* (314) frame_exclude_opt ::= EXCLUDE frame_exclude */ - -2, /* (315) frame_exclude ::= NO OTHERS */ - -2, /* (316) frame_exclude ::= CURRENT ROW */ - -1, /* (317) frame_exclude ::= GROUP|TIES */ - -2, /* (318) window_clause ::= WINDOW windowdefn_list */ - -2, /* (319) filter_over ::= filter_clause over_clause */ - -1, /* (320) filter_over ::= over_clause */ - -1, /* (321) filter_over ::= filter_clause */ - -4, /* (322) over_clause ::= OVER LP window RP */ - -2, /* (323) over_clause ::= OVER nm */ - -5, /* (324) filter_clause ::= FILTER LP WHERE expr RP */ - -1, /* (325) input ::= cmdlist */ - -2, /* (326) cmdlist ::= cmdlist ecmd */ - -1, /* (327) cmdlist ::= ecmd */ - -1, /* (328) ecmd ::= SEMI */ - -2, /* (329) ecmd ::= cmdx SEMI */ - -3, /* (330) ecmd ::= explain cmdx SEMI */ - 0, /* (331) trans_opt ::= */ - -1, /* (332) trans_opt ::= TRANSACTION */ - -2, /* (333) trans_opt ::= TRANSACTION nm */ - -1, /* (334) savepoint_opt ::= SAVEPOINT */ - 0, /* (335) savepoint_opt ::= */ - -2, /* (336) cmd ::= create_table create_table_args */ - -4, /* (337) columnlist ::= columnlist COMMA columnname carglist */ - -2, /* (338) columnlist ::= columnname carglist */ - -1, /* (339) nm ::= ID|INDEXED */ - -1, /* (340) nm ::= STRING */ - -1, /* (341) nm ::= JOIN_KW */ - -1, /* (342) typetoken ::= typename */ - -1, /* (343) typename ::= ID|STRING */ - -1, /* (344) signed ::= plus_num */ - -1, /* (345) signed ::= minus_num */ - -2, /* (346) carglist ::= carglist ccons */ - 0, /* (347) carglist ::= */ - -2, /* (348) ccons ::= NULL onconf */ - -4, /* (349) ccons ::= GENERATED ALWAYS AS generated */ - -2, /* (350) ccons ::= AS generated */ - -2, /* (351) conslist_opt ::= COMMA conslist */ - -3, /* (352) conslist ::= conslist tconscomma tcons */ - -1, /* (353) conslist ::= tcons */ - 0, /* (354) tconscomma ::= */ - -1, /* (355) defer_subclause_opt ::= defer_subclause */ - -1, /* (356) resolvetype ::= raisetype */ - -1, /* (357) selectnowith ::= oneselect */ - -1, /* (358) oneselect ::= values */ - -2, /* (359) sclp ::= selcollist COMMA */ - -1, /* (360) as ::= ID|STRING */ - -1, /* (361) expr ::= term */ - -1, /* (362) likeop ::= LIKE_KW|MATCH */ - -1, /* (363) exprlist ::= nexprlist */ - -1, /* (364) nmnum ::= plus_num */ - -1, /* (365) nmnum ::= nm */ - -1, /* (366) nmnum ::= ON */ - -1, /* (367) nmnum ::= DELETE */ - -1, /* (368) nmnum ::= DEFAULT */ - -1, /* (369) plus_num ::= INTEGER|FLOAT */ - 0, /* (370) foreach_clause ::= */ - -3, /* (371) foreach_clause ::= FOR EACH ROW */ - -1, /* (372) trnm ::= nm */ - 0, /* (373) tridxby ::= */ - -1, /* (374) database_kw_opt ::= DATABASE */ - 0, /* (375) database_kw_opt ::= */ - 0, /* (376) kwcolumn_opt ::= */ - -1, /* (377) kwcolumn_opt ::= COLUMNKW */ - -1, /* (378) vtabarglist ::= vtabarg */ - -3, /* (379) vtabarglist ::= vtabarglist COMMA vtabarg */ - -2, /* (380) vtabarg ::= vtabarg vtabargtoken */ - 0, /* (381) anylist ::= */ - -4, /* (382) anylist ::= anylist LP anylist RP */ - -2, /* (383) anylist ::= anylist ANY */ - 0, /* (384) with ::= */ + 0, /* (151) where_opt_ret ::= */ + -2, /* (152) where_opt_ret ::= WHERE expr */ + -2, /* (153) where_opt_ret ::= RETURNING selcollist */ + -4, /* (154) where_opt_ret ::= WHERE expr RETURNING selcollist */ + -9, /* (155) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ + -5, /* (156) setlist ::= setlist COMMA nm EQ expr */ + -7, /* (157) setlist ::= setlist COMMA LP idlist RP EQ expr */ + -3, /* (158) setlist ::= nm EQ expr */ + -5, /* (159) setlist ::= LP idlist RP EQ expr */ + -7, /* (160) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + -8, /* (161) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 0, /* (162) upsert ::= */ + -2, /* (163) upsert ::= RETURNING selcollist */ + -12, /* (164) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + -9, /* (165) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + -5, /* (166) upsert ::= ON CONFLICT DO NOTHING returning */ + -8, /* (167) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + -2, /* (168) returning ::= RETURNING selcollist */ + -2, /* (169) insert_cmd ::= INSERT orconf */ + -1, /* (170) insert_cmd ::= REPLACE */ + 0, /* (171) idlist_opt ::= */ + -3, /* (172) idlist_opt ::= LP idlist RP */ + -3, /* (173) idlist ::= idlist COMMA nm */ + -1, /* (174) idlist ::= nm */ + -3, /* (175) expr ::= LP expr RP */ + -1, /* (176) expr ::= ID|INDEXED */ + -1, /* (177) expr ::= JOIN_KW */ + -3, /* (178) expr ::= nm DOT nm */ + -5, /* (179) expr ::= nm DOT nm DOT nm */ + -1, /* (180) term ::= NULL|FLOAT|BLOB */ + -1, /* (181) term ::= STRING */ + -1, /* (182) term ::= INTEGER */ + -1, /* (183) expr ::= VARIABLE */ + -3, /* (184) expr ::= expr COLLATE ID|STRING */ + -6, /* (185) expr ::= CAST LP expr AS typetoken RP */ + -5, /* (186) expr ::= ID|INDEXED LP distinct exprlist RP */ + -4, /* (187) expr ::= ID|INDEXED LP STAR RP */ + -6, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + -5, /* (189) expr ::= ID|INDEXED LP STAR RP filter_over */ + -1, /* (190) term ::= CTIME_KW */ + -5, /* (191) expr ::= LP nexprlist COMMA expr RP */ + -3, /* (192) expr ::= expr AND expr */ + -3, /* (193) expr ::= expr OR expr */ + -3, /* (194) expr ::= expr LT|GT|GE|LE expr */ + -3, /* (195) expr ::= expr EQ|NE expr */ + -3, /* (196) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + -3, /* (197) expr ::= expr PLUS|MINUS expr */ + -3, /* (198) expr ::= expr STAR|SLASH|REM expr */ + -3, /* (199) expr ::= expr CONCAT expr */ + -2, /* (200) likeop ::= NOT LIKE_KW|MATCH */ + -3, /* (201) expr ::= expr likeop expr */ + -5, /* (202) expr ::= expr likeop expr ESCAPE expr */ + -2, /* (203) expr ::= expr ISNULL|NOTNULL */ + -3, /* (204) expr ::= expr NOT NULL */ + -3, /* (205) expr ::= expr IS expr */ + -4, /* (206) expr ::= expr IS NOT expr */ + -2, /* (207) expr ::= NOT expr */ + -2, /* (208) expr ::= BITNOT expr */ + -2, /* (209) expr ::= PLUS|MINUS expr */ + -1, /* (210) between_op ::= BETWEEN */ + -2, /* (211) between_op ::= NOT BETWEEN */ + -5, /* (212) expr ::= expr between_op expr AND expr */ + -1, /* (213) in_op ::= IN */ + -2, /* (214) in_op ::= NOT IN */ + -5, /* (215) expr ::= expr in_op LP exprlist RP */ + -3, /* (216) expr ::= LP select RP */ + -5, /* (217) expr ::= expr in_op LP select RP */ + -5, /* (218) expr ::= expr in_op nm dbnm paren_exprlist */ + -4, /* (219) expr ::= EXISTS LP select RP */ + -5, /* (220) expr ::= CASE case_operand case_exprlist case_else END */ + -5, /* (221) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + -4, /* (222) case_exprlist ::= WHEN expr THEN expr */ + -2, /* (223) case_else ::= ELSE expr */ + 0, /* (224) case_else ::= */ + -1, /* (225) case_operand ::= expr */ + 0, /* (226) case_operand ::= */ + 0, /* (227) exprlist ::= */ + -3, /* (228) nexprlist ::= nexprlist COMMA expr */ + -1, /* (229) nexprlist ::= expr */ + 0, /* (230) paren_exprlist ::= */ + -3, /* (231) paren_exprlist ::= LP exprlist RP */ + -12, /* (232) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + -1, /* (233) uniqueflag ::= UNIQUE */ + 0, /* (234) uniqueflag ::= */ + 0, /* (235) eidlist_opt ::= */ + -3, /* (236) eidlist_opt ::= LP eidlist RP */ + -5, /* (237) eidlist ::= eidlist COMMA nm collate sortorder */ + -3, /* (238) eidlist ::= nm collate sortorder */ + 0, /* (239) collate ::= */ + -2, /* (240) collate ::= COLLATE ID|STRING */ + -4, /* (241) cmd ::= DROP INDEX ifexists fullname */ + -2, /* (242) cmd ::= VACUUM vinto */ + -3, /* (243) cmd ::= VACUUM nm vinto */ + -2, /* (244) vinto ::= INTO expr */ + 0, /* (245) vinto ::= */ + -3, /* (246) cmd ::= PRAGMA nm dbnm */ + -5, /* (247) cmd ::= PRAGMA nm dbnm EQ nmnum */ + -6, /* (248) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + -5, /* (249) cmd ::= PRAGMA nm dbnm EQ minus_num */ + -6, /* (250) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + -2, /* (251) plus_num ::= PLUS INTEGER|FLOAT */ + -2, /* (252) minus_num ::= MINUS INTEGER|FLOAT */ + -5, /* (253) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + -11, /* (254) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + -1, /* (255) trigger_time ::= BEFORE|AFTER */ + -2, /* (256) trigger_time ::= INSTEAD OF */ + 0, /* (257) trigger_time ::= */ + -1, /* (258) trigger_event ::= DELETE|INSERT */ + -1, /* (259) trigger_event ::= UPDATE */ + -3, /* (260) trigger_event ::= UPDATE OF idlist */ + 0, /* (261) when_clause ::= */ + -2, /* (262) when_clause ::= WHEN expr */ + -3, /* (263) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + -2, /* (264) trigger_cmd_list ::= trigger_cmd SEMI */ + -3, /* (265) trnm ::= nm DOT nm */ + -3, /* (266) tridxby ::= INDEXED BY nm */ + -2, /* (267) tridxby ::= NOT INDEXED */ + -9, /* (268) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + -8, /* (269) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + -6, /* (270) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + -3, /* (271) trigger_cmd ::= scanpt select scanpt */ + -4, /* (272) expr ::= RAISE LP IGNORE RP */ + -6, /* (273) expr ::= RAISE LP raisetype COMMA nm RP */ + -1, /* (274) raisetype ::= ROLLBACK */ + -1, /* (275) raisetype ::= ABORT */ + -1, /* (276) raisetype ::= FAIL */ + -4, /* (277) cmd ::= DROP TRIGGER ifexists fullname */ + -6, /* (278) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + -3, /* (279) cmd ::= DETACH database_kw_opt expr */ + 0, /* (280) key_opt ::= */ + -2, /* (281) key_opt ::= KEY expr */ + -1, /* (282) cmd ::= REINDEX */ + -3, /* (283) cmd ::= REINDEX nm dbnm */ + -1, /* (284) cmd ::= ANALYZE */ + -3, /* (285) cmd ::= ANALYZE nm dbnm */ + -6, /* (286) cmd ::= ALTER TABLE fullname RENAME TO nm */ + -7, /* (287) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + -6, /* (288) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + -1, /* (289) add_column_fullname ::= fullname */ + -8, /* (290) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + -1, /* (291) cmd ::= create_vtab */ + -4, /* (292) cmd ::= create_vtab LP vtabarglist RP */ + -8, /* (293) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 0, /* (294) vtabarg ::= */ + -1, /* (295) vtabargtoken ::= ANY */ + -3, /* (296) vtabargtoken ::= lp anylist RP */ + -1, /* (297) lp ::= LP */ + -2, /* (298) with ::= WITH wqlist */ + -3, /* (299) with ::= WITH RECURSIVE wqlist */ + -1, /* (300) wqas ::= AS */ + -2, /* (301) wqas ::= AS MATERIALIZED */ + -3, /* (302) wqas ::= AS NOT MATERIALIZED */ + -6, /* (303) wqitem ::= nm eidlist_opt wqas LP select RP */ + -1, /* (304) wqlist ::= wqitem */ + -3, /* (305) wqlist ::= wqlist COMMA wqitem */ + -1, /* (306) windowdefn_list ::= windowdefn */ + -3, /* (307) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + -5, /* (308) windowdefn ::= nm AS LP window RP */ + -5, /* (309) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + -6, /* (310) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + -4, /* (311) window ::= ORDER BY sortlist frame_opt */ + -5, /* (312) window ::= nm ORDER BY sortlist frame_opt */ + -1, /* (313) window ::= frame_opt */ + -2, /* (314) window ::= nm frame_opt */ + 0, /* (315) frame_opt ::= */ + -3, /* (316) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + -6, /* (317) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + -1, /* (318) range_or_rows ::= RANGE|ROWS|GROUPS */ + -1, /* (319) frame_bound_s ::= frame_bound */ + -2, /* (320) frame_bound_s ::= UNBOUNDED PRECEDING */ + -1, /* (321) frame_bound_e ::= frame_bound */ + -2, /* (322) frame_bound_e ::= UNBOUNDED FOLLOWING */ + -2, /* (323) frame_bound ::= expr PRECEDING|FOLLOWING */ + -2, /* (324) frame_bound ::= CURRENT ROW */ + 0, /* (325) frame_exclude_opt ::= */ + -2, /* (326) frame_exclude_opt ::= EXCLUDE frame_exclude */ + -2, /* (327) frame_exclude ::= NO OTHERS */ + -2, /* (328) frame_exclude ::= CURRENT ROW */ + -1, /* (329) frame_exclude ::= GROUP|TIES */ + -2, /* (330) window_clause ::= WINDOW windowdefn_list */ + -2, /* (331) filter_over ::= filter_clause over_clause */ + -1, /* (332) filter_over ::= over_clause */ + -1, /* (333) filter_over ::= filter_clause */ + -4, /* (334) over_clause ::= OVER LP window RP */ + -2, /* (335) over_clause ::= OVER nm */ + -5, /* (336) filter_clause ::= FILTER LP WHERE expr RP */ + -1, /* (337) input ::= cmdlist */ + -2, /* (338) cmdlist ::= cmdlist ecmd */ + -1, /* (339) cmdlist ::= ecmd */ + -1, /* (340) ecmd ::= SEMI */ + -2, /* (341) ecmd ::= cmdx SEMI */ + -3, /* (342) ecmd ::= explain cmdx SEMI */ + 0, /* (343) trans_opt ::= */ + -1, /* (344) trans_opt ::= TRANSACTION */ + -2, /* (345) trans_opt ::= TRANSACTION nm */ + -1, /* (346) savepoint_opt ::= SAVEPOINT */ + 0, /* (347) savepoint_opt ::= */ + -2, /* (348) cmd ::= create_table create_table_args */ + -4, /* (349) columnlist ::= columnlist COMMA columnname carglist */ + -2, /* (350) columnlist ::= columnname carglist */ + -1, /* (351) nm ::= ID|INDEXED */ + -1, /* (352) nm ::= STRING */ + -1, /* (353) nm ::= JOIN_KW */ + -1, /* (354) typetoken ::= typename */ + -1, /* (355) typename ::= ID|STRING */ + -1, /* (356) signed ::= plus_num */ + -1, /* (357) signed ::= minus_num */ + -2, /* (358) carglist ::= carglist ccons */ + 0, /* (359) carglist ::= */ + -2, /* (360) ccons ::= NULL onconf */ + -4, /* (361) ccons ::= GENERATED ALWAYS AS generated */ + -2, /* (362) ccons ::= AS generated */ + -2, /* (363) conslist_opt ::= COMMA conslist */ + -3, /* (364) conslist ::= conslist tconscomma tcons */ + -1, /* (365) conslist ::= tcons */ + 0, /* (366) tconscomma ::= */ + -1, /* (367) defer_subclause_opt ::= defer_subclause */ + -1, /* (368) resolvetype ::= raisetype */ + -1, /* (369) selectnowith ::= oneselect */ + -1, /* (370) oneselect ::= values */ + -2, /* (371) sclp ::= selcollist COMMA */ + -1, /* (372) as ::= ID|STRING */ + 0, /* (373) returning ::= */ + -1, /* (374) expr ::= term */ + -1, /* (375) likeop ::= LIKE_KW|MATCH */ + -1, /* (376) exprlist ::= nexprlist */ + -1, /* (377) nmnum ::= plus_num */ + -1, /* (378) nmnum ::= nm */ + -1, /* (379) nmnum ::= ON */ + -1, /* (380) nmnum ::= DELETE */ + -1, /* (381) nmnum ::= DEFAULT */ + -1, /* (382) plus_num ::= INTEGER|FLOAT */ + 0, /* (383) foreach_clause ::= */ + -3, /* (384) foreach_clause ::= FOR EACH ROW */ + -1, /* (385) trnm ::= nm */ + 0, /* (386) tridxby ::= */ + -1, /* (387) database_kw_opt ::= DATABASE */ + 0, /* (388) database_kw_opt ::= */ + 0, /* (389) kwcolumn_opt ::= */ + -1, /* (390) kwcolumn_opt ::= COLUMNKW */ + -1, /* (391) vtabarglist ::= vtabarg */ + -3, /* (392) vtabarglist ::= vtabarglist COMMA vtabarg */ + -2, /* (393) vtabarg ::= vtabarg vtabargtoken */ + 0, /* (394) anylist ::= */ + -4, /* (395) anylist ::= anylist LP anylist RP */ + -2, /* (396) anylist ::= anylist ANY */ + 0, /* (397) with ::= */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -158281,55 +160508,6 @@ static YYACTIONTYPE yy_reduce( (void)yyLookahead; (void)yyLookaheadToken; yymsp = yypParser->yytos; - assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ); -#ifndef NDEBUG - if( yyTraceFILE ){ - yysize = yyRuleInfoNRhs[yyruleno]; - if( yysize ){ - fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", - yyTracePrompt, - yyruleno, yyRuleName[yyruleno], - yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action", - yymsp[yysize].stateno); - }else{ - fprintf(yyTraceFILE, "%sReduce %d [%s]%s.\n", - yyTracePrompt, yyruleno, yyRuleName[yyruleno], - yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action"); - } - } -#endif /* NDEBUG */ - - /* Check that the stack is large enough to grow by a single entry - ** if the RHS of the rule is empty. This ensures that there is room - ** enough on the stack to push the LHS value */ - if( yyRuleInfoNRhs[yyruleno]==0 ){ -#ifdef YYTRACKMAXSTACKDEPTH - if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ - yypParser->yyhwm++; - assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack)); - } -#endif -#if YYSTACKDEPTH>0 - if( yypParser->yytos>=yypParser->yystackEnd ){ - yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ - if( yyGrowStack(yypParser) ){ - yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } - yymsp = yypParser->yytos; - } -#endif - } switch( yyruleno ){ /* Beginning here are the reduction cases. A typical example @@ -158352,16 +160530,16 @@ static YYACTIONTYPE yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy192);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy60);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy192 = TK_DEFERRED;} +{yymsp[1].minor.yy60 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); - case 306: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==306); -{yymsp[0].minor.yy192 = yymsp[0].major; /*A-overwrites-X*/} + case 318: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==318); +{yymsp[0].minor.yy60 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9); @@ -158384,7 +160562,7 @@ static YYACTIONTYPE yy_reduce( break; case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy192,0,0,yymsp[-2].minor.yy192); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy60,0,0,yymsp[-2].minor.yy60); } break; case 14: /* createkw ::= CREATE */ @@ -158398,33 +160576,33 @@ static YYACTIONTYPE yy_reduce( case 70: /* defer_subclause_opt ::= */ yytestcase(yyruleno==70); case 79: /* ifexists ::= */ yytestcase(yyruleno==79); case 96: /* distinct ::= */ yytestcase(yyruleno==96); - case 232: /* collate ::= */ yytestcase(yyruleno==232); -{yymsp[1].minor.yy192 = 0;} + case 239: /* collate ::= */ yytestcase(yyruleno==239); +{yymsp[1].minor.yy60 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy192 = 1;} +{yymsp[-2].minor.yy60 = 1;} break; case 17: /* temp ::= TEMP */ case 46: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==46); -{yymsp[0].minor.yy192 = 1;} +{yymsp[0].minor.yy60 = 1;} break; case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_options */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy192,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy60,0); } break; case 20: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy539); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy539); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy307); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy307); } break; case 22: /* table_options ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy192 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy60 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ - yymsp[-1].minor.yy192 = 0; + yymsp[-1].minor.yy60 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } @@ -158453,7 +160631,7 @@ static YYACTIONTYPE yy_reduce( case 28: /* scanpt ::= */ { assert( yyLookahead!=YYNOCODE ); - yymsp[1].minor.yy436 = yyLookaheadToken.z; + yymsp[1].minor.yy528 = yyLookaheadToken.z; } break; case 29: /* scantok ::= */ @@ -158467,17 +160645,17 @@ static YYACTIONTYPE yy_reduce( {pParse->constraintName = yymsp[0].minor.yy0;} break; case 31: /* ccons ::= DEFAULT scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy202,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy602,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 32: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy202,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} +{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy602,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} break; case 33: /* ccons ::= DEFAULT PLUS scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy202,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy602,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 34: /* ccons ::= DEFAULT MINUS scantok term */ { - Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy202, 0); + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy602, 0); sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]); } break; @@ -158492,176 +160670,158 @@ static YYACTIONTYPE yy_reduce( } break; case 36: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy192);} +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy60);} break; case 37: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy192,yymsp[0].minor.yy192,yymsp[-2].minor.yy192);} +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy60,yymsp[0].minor.yy60,yymsp[-2].minor.yy60);} break; case 38: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy192,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy60,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 39: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy202,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy602,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} break; case 40: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy242,yymsp[0].minor.yy192);} +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy338,yymsp[0].minor.yy60);} break; case 41: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy192);} +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy60);} break; case 42: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 43: /* generated ::= LP expr RP */ -{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy202,0);} +{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy602,0);} break; case 44: /* generated ::= LP expr RP ID */ -{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy202,&yymsp[0].minor.yy0);} +{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy602,&yymsp[0].minor.yy0);} break; case 47: /* refargs ::= */ -{ yymsp[1].minor.yy192 = OE_None*0x0101; /* EV: R-19803-45884 */} +{ yymsp[1].minor.yy60 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 48: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy192 = (yymsp[-1].minor.yy192 & ~yymsp[0].minor.yy207.mask) | yymsp[0].minor.yy207.value; } +{ yymsp[-1].minor.yy60 = (yymsp[-1].minor.yy60 & ~yymsp[0].minor.yy615.mask) | yymsp[0].minor.yy615.value; } break; case 49: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy207.value = 0; yymsp[-1].minor.yy207.mask = 0x000000; } +{ yymsp[-1].minor.yy615.value = 0; yymsp[-1].minor.yy615.mask = 0x000000; } break; case 50: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy207.value = 0; yymsp[-2].minor.yy207.mask = 0x000000; } +{ yymsp[-2].minor.yy615.value = 0; yymsp[-2].minor.yy615.mask = 0x000000; } break; case 51: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy207.value = yymsp[0].minor.yy192; yymsp[-2].minor.yy207.mask = 0x0000ff; } +{ yymsp[-2].minor.yy615.value = yymsp[0].minor.yy60; yymsp[-2].minor.yy615.mask = 0x0000ff; } break; case 52: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy207.value = yymsp[0].minor.yy192<<8; yymsp[-2].minor.yy207.mask = 0x00ff00; } +{ yymsp[-2].minor.yy615.value = yymsp[0].minor.yy60<<8; yymsp[-2].minor.yy615.mask = 0x00ff00; } break; case 53: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy192 = OE_SetNull; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy60 = OE_SetNull; /* EV: R-33326-45252 */} break; case 54: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy192 = OE_SetDflt; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy60 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 55: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy192 = OE_Cascade; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy60 = OE_Cascade; /* EV: R-33326-45252 */} break; case 56: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy192 = OE_Restrict; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy60 = OE_Restrict; /* EV: R-33326-45252 */} break; case 57: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy192 = OE_None; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy60 = OE_None; /* EV: R-33326-45252 */} break; case 58: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy192 = 0;} +{yymsp[-2].minor.yy60 = 0;} break; case 59: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 74: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==74); - case 162: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==162); -{yymsp[-1].minor.yy192 = yymsp[0].minor.yy192;} + case 169: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==169); +{yymsp[-1].minor.yy60 = yymsp[0].minor.yy60;} break; case 61: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 78: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==78); - case 204: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==204); - case 207: /* in_op ::= NOT IN */ yytestcase(yyruleno==207); - case 233: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==233); -{yymsp[-1].minor.yy192 = 1;} + case 211: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==211); + case 214: /* in_op ::= NOT IN */ yytestcase(yyruleno==214); + case 240: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==240); +{yymsp[-1].minor.yy60 = 1;} break; case 62: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy192 = 0;} +{yymsp[-1].minor.yy60 = 0;} break; case 64: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; case 66: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy242,yymsp[0].minor.yy192,yymsp[-2].minor.yy192,0);} +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy338,yymsp[0].minor.yy60,yymsp[-2].minor.yy60,0);} break; case 67: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy242,yymsp[0].minor.yy192,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy338,yymsp[0].minor.yy60,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 68: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy202,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy602,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} break; case 69: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy242, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy242, yymsp[-1].minor.yy192); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy192); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy338, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy338, yymsp[-1].minor.yy60); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy60); } break; case 71: /* onconf ::= */ case 73: /* orconf ::= */ yytestcase(yyruleno==73); -{yymsp[1].minor.yy192 = OE_Default;} +{yymsp[1].minor.yy60 = OE_Default;} break; case 72: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy192 = yymsp[0].minor.yy192;} +{yymsp[-2].minor.yy60 = yymsp[0].minor.yy60;} break; case 75: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy192 = OE_Ignore;} +{yymsp[0].minor.yy60 = OE_Ignore;} break; case 76: /* resolvetype ::= REPLACE */ - case 163: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==163); -{yymsp[0].minor.yy192 = OE_Replace;} + case 170: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==170); +{yymsp[0].minor.yy60 = OE_Replace;} break; case 77: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy47, 0, yymsp[-1].minor.yy192); + sqlite3DropTable(pParse, yymsp[0].minor.yy291, 0, yymsp[-1].minor.yy60); } break; case 80: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy242, yymsp[0].minor.yy539, yymsp[-7].minor.yy192, yymsp[-5].minor.yy192); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy338, yymsp[0].minor.yy307, yymsp[-7].minor.yy60, yymsp[-5].minor.yy60); } break; case 81: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy47, 1, yymsp[-1].minor.yy192); + sqlite3DropTable(pParse, yymsp[0].minor.yy291, 1, yymsp[-1].minor.yy60); } break; case 82: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy539, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy539); + sqlite3Select(pParse, yymsp[0].minor.yy307, &dest); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy307); } break; case 83: /* select ::= WITH wqlist selectnowith */ -{ - Select *p = yymsp[0].minor.yy539; - if( p ){ - p->pWith = yymsp[-1].minor.yy131; - parserDoubleLinkSelect(pParse, p); - }else{ - sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy131); - } - yymsp[-2].minor.yy539 = p; -} +{yymsp[-2].minor.yy307 = attachWithToSelect(pParse,yymsp[0].minor.yy307,yymsp[-1].minor.yy195);} break; case 84: /* select ::= WITH RECURSIVE wqlist selectnowith */ -{ - Select *p = yymsp[0].minor.yy539; - if( p ){ - p->pWith = yymsp[-1].minor.yy131; - parserDoubleLinkSelect(pParse, p); - }else{ - sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy131); - } - yymsp[-3].minor.yy539 = p; -} +{yymsp[-3].minor.yy307 = attachWithToSelect(pParse,yymsp[0].minor.yy307,yymsp[-1].minor.yy195);} break; case 85: /* select ::= selectnowith */ { - Select *p = yymsp[0].minor.yy539; + Select *p = yymsp[0].minor.yy307; if( p ){ parserDoubleLinkSelect(pParse, p); } - yymsp[0].minor.yy539 = p; /*A-overwrites-X*/ + yymsp[0].minor.yy307 = p; /*A-overwrites-X*/ } break; case 86: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy539; - Select *pLhs = yymsp[-2].minor.yy539; + Select *pRhs = yymsp[0].minor.yy307; + Select *pLhs = yymsp[-2].minor.yy307; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -158671,83 +160831,83 @@ static YYACTIONTYPE yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy192; + pRhs->op = (u8)yymsp[-1].minor.yy60; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy192!=TK_ALL ) pParse->hasCompound = 1; + if( yymsp[-1].minor.yy60!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy539 = pRhs; + yymsp[-2].minor.yy307 = pRhs; } break; case 87: /* multiselect_op ::= UNION */ case 89: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==89); -{yymsp[0].minor.yy192 = yymsp[0].major; /*A-overwrites-OP*/} +{yymsp[0].minor.yy60 = yymsp[0].major; /*A-overwrites-OP*/} break; case 88: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy192 = TK_ALL;} +{yymsp[-1].minor.yy60 = TK_ALL;} break; case 90: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { - yymsp[-8].minor.yy539 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy242,yymsp[-5].minor.yy47,yymsp[-4].minor.yy202,yymsp[-3].minor.yy242,yymsp[-2].minor.yy202,yymsp[-1].minor.yy242,yymsp[-7].minor.yy192,yymsp[0].minor.yy202); + yymsp[-8].minor.yy307 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy338,yymsp[-5].minor.yy291,yymsp[-4].minor.yy602,yymsp[-3].minor.yy338,yymsp[-2].minor.yy602,yymsp[-1].minor.yy338,yymsp[-7].minor.yy60,yymsp[0].minor.yy602); } break; case 91: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ { - yymsp[-9].minor.yy539 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy242,yymsp[-6].minor.yy47,yymsp[-5].minor.yy202,yymsp[-4].minor.yy242,yymsp[-3].minor.yy202,yymsp[-1].minor.yy242,yymsp[-8].minor.yy192,yymsp[0].minor.yy202); - if( yymsp[-9].minor.yy539 ){ - yymsp[-9].minor.yy539->pWinDefn = yymsp[-2].minor.yy303; + yymsp[-9].minor.yy307 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy338,yymsp[-6].minor.yy291,yymsp[-5].minor.yy602,yymsp[-4].minor.yy338,yymsp[-3].minor.yy602,yymsp[-1].minor.yy338,yymsp[-8].minor.yy60,yymsp[0].minor.yy602); + if( yymsp[-9].minor.yy307 ){ + yymsp[-9].minor.yy307->pWinDefn = yymsp[-2].minor.yy19; }else{ - sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy303); + sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy19); } } break; case 92: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy539 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy242,0,0,0,0,0,SF_Values,0); + yymsp[-3].minor.yy307 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy338,0,0,0,0,0,SF_Values,0); } break; case 93: /* values ::= values COMMA LP nexprlist RP */ { - Select *pRight, *pLeft = yymsp[-4].minor.yy539; - pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy242,0,0,0,0,0,SF_Values|SF_MultiValue,0); + Select *pRight, *pLeft = yymsp[-4].minor.yy307; + pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy338,0,0,0,0,0,SF_Values|SF_MultiValue,0); if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue; if( pRight ){ pRight->op = TK_ALL; pRight->pPrior = pLeft; - yymsp[-4].minor.yy539 = pRight; + yymsp[-4].minor.yy307 = pRight; }else{ - yymsp[-4].minor.yy539 = pLeft; + yymsp[-4].minor.yy307 = pLeft; } } break; case 94: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy192 = SF_Distinct;} +{yymsp[0].minor.yy60 = SF_Distinct;} break; case 95: /* distinct ::= ALL */ -{yymsp[0].minor.yy192 = SF_All;} +{yymsp[0].minor.yy60 = SF_All;} break; case 97: /* sclp ::= */ case 130: /* orderby_opt ::= */ yytestcase(yyruleno==130); case 140: /* groupby_opt ::= */ yytestcase(yyruleno==140); - case 220: /* exprlist ::= */ yytestcase(yyruleno==220); - case 223: /* paren_exprlist ::= */ yytestcase(yyruleno==223); - case 228: /* eidlist_opt ::= */ yytestcase(yyruleno==228); -{yymsp[1].minor.yy242 = 0;} + case 227: /* exprlist ::= */ yytestcase(yyruleno==227); + case 230: /* paren_exprlist ::= */ yytestcase(yyruleno==230); + case 235: /* eidlist_opt ::= */ yytestcase(yyruleno==235); +{yymsp[1].minor.yy338 = 0;} break; case 98: /* selcollist ::= sclp scanpt expr scanpt as */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy242, yymsp[-2].minor.yy202); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy242, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy242,yymsp[-3].minor.yy436,yymsp[-1].minor.yy436); + yymsp[-4].minor.yy338 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy338, yymsp[-2].minor.yy602); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy338, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy338,yymsp[-3].minor.yy528,yymsp[-1].minor.yy528); } break; case 99: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); - yymsp[-2].minor.yy242 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy242, p); + yymsp[-2].minor.yy338 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy338, p); } break; case 100: /* selcollist ::= sclp scanpt nm DOT STAR */ @@ -158755,56 +160915,56 @@ static YYACTIONTYPE yy_reduce( Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); Expr *pLeft = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242, pDot); + yymsp[-4].minor.yy338 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy338, pDot); } break; case 101: /* as ::= AS nm */ case 112: /* dbnm ::= DOT nm */ yytestcase(yyruleno==112); - case 244: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==244); - case 245: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==245); + case 251: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==251); + case 252: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==252); {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;} break; case 103: /* from ::= */ case 106: /* stl_prefix ::= */ yytestcase(yyruleno==106); -{yymsp[1].minor.yy47 = 0;} +{yymsp[1].minor.yy291 = 0;} break; case 104: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy47 = yymsp[0].minor.yy47; - sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy47); + yymsp[-1].minor.yy291 = yymsp[0].minor.yy291; + sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy291); } break; case 105: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy47 && yymsp[-1].minor.yy47->nSrc>0) ) yymsp[-1].minor.yy47->a[yymsp[-1].minor.yy47->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy192; + if( ALWAYS(yymsp[-1].minor.yy291 && yymsp[-1].minor.yy291->nSrc>0) ) yymsp[-1].minor.yy291->a[yymsp[-1].minor.yy291->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy60; } break; case 107: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ { - yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); - sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy47, &yymsp[-2].minor.yy0); + yymsp[-6].minor.yy291 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy291,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy602,yymsp[0].minor.yy288); + sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy291, &yymsp[-2].minor.yy0); } break; case 108: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ { - yymsp[-8].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy47,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); - sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy47, yymsp[-4].minor.yy242); + yymsp[-8].minor.yy291 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy291,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy602,yymsp[0].minor.yy288); + sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy291, yymsp[-4].minor.yy338); } break; case 109: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */ { - yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy539,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); + yymsp[-6].minor.yy291 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy291,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy307,yymsp[-1].minor.yy602,yymsp[0].minor.yy288); } break; case 110: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ { - if( yymsp[-6].minor.yy47==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy202==0 && yymsp[0].minor.yy600==0 ){ - yymsp[-6].minor.yy47 = yymsp[-4].minor.yy47; - }else if( yymsp[-4].minor.yy47->nSrc==1 ){ - yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); - if( yymsp[-6].minor.yy47 ){ - struct SrcList_item *pNew = &yymsp[-6].minor.yy47->a[yymsp[-6].minor.yy47->nSrc-1]; - struct SrcList_item *pOld = yymsp[-4].minor.yy47->a; + if( yymsp[-6].minor.yy291==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy602==0 && yymsp[0].minor.yy288==0 ){ + yymsp[-6].minor.yy291 = yymsp[-4].minor.yy291; + }else if( yymsp[-4].minor.yy291->nSrc==1 ){ + yymsp[-6].minor.yy291 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy291,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy602,yymsp[0].minor.yy288); + if( yymsp[-6].minor.yy291 ){ + SrcItem *pNew = &yymsp[-6].minor.yy291->a[yymsp[-6].minor.yy291->nSrc-1]; + SrcItem *pOld = yymsp[-4].minor.yy291->a; pNew->zName = pOld->zName; pNew->zDatabase = pOld->zDatabase; pNew->pSelect = pOld->pSelect; @@ -158817,12 +160977,12 @@ static YYACTIONTYPE yy_reduce( pOld->zName = pOld->zDatabase = 0; pOld->pSelect = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy47); + sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy291); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy47); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy47,0,0,0,0,SF_NestedFrom,0); - yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); + sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy291); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy291,0,0,0,0,SF_NestedFrom,0); + yymsp[-6].minor.yy291 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy291,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy602,yymsp[0].minor.yy288); } } break; @@ -158832,63 +160992,65 @@ static YYACTIONTYPE yy_reduce( break; case 113: /* fullname ::= nm */ { - yylhsminor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); - if( IN_RENAME_OBJECT && yylhsminor.yy47 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy47->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy291 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); + if( IN_RENAME_OBJECT && yylhsminor.yy291 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy291->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy47 = yylhsminor.yy47; + yymsp[0].minor.yy291 = yylhsminor.yy291; break; case 114: /* fullname ::= nm DOT nm */ { - yylhsminor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); - if( IN_RENAME_OBJECT && yylhsminor.yy47 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy47->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy291 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); + if( IN_RENAME_OBJECT && yylhsminor.yy291 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy291->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy47 = yylhsminor.yy47; + yymsp[-2].minor.yy291 = yylhsminor.yy291; break; case 115: /* xfullname ::= nm */ -{yymsp[0].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} +{yymsp[0].minor.yy291 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} break; case 116: /* xfullname ::= nm DOT nm */ -{yymsp[-2].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[-2].minor.yy291 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 117: /* xfullname ::= nm DOT nm AS nm */ { - yymsp[-4].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ - if( yymsp[-4].minor.yy47 ) yymsp[-4].minor.yy47->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-4].minor.yy291 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ + if( yymsp[-4].minor.yy291 ) yymsp[-4].minor.yy291->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 118: /* xfullname ::= nm AS nm */ { - yymsp[-2].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ - if( yymsp[-2].minor.yy47 ) yymsp[-2].minor.yy47->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-2].minor.yy291 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ + if( yymsp[-2].minor.yy291 ) yymsp[-2].minor.yy291->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 119: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy192 = JT_INNER; } +{ yymsp[0].minor.yy60 = JT_INNER; } break; case 120: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy192 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} +{yymsp[-1].minor.yy60 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; case 121: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy192 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} +{yymsp[-2].minor.yy60 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; case 122: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy192 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} +{yymsp[-3].minor.yy60 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; case 123: /* on_opt ::= ON expr */ case 143: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==143); case 150: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==150); - case 216: /* case_else ::= ELSE expr */ yytestcase(yyruleno==216); - case 237: /* vinto ::= INTO expr */ yytestcase(yyruleno==237); -{yymsp[-1].minor.yy202 = yymsp[0].minor.yy202;} + case 152: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==152); + case 223: /* case_else ::= ELSE expr */ yytestcase(yyruleno==223); + case 244: /* vinto ::= INTO expr */ yytestcase(yyruleno==244); +{yymsp[-1].minor.yy602 = yymsp[0].minor.yy602;} break; case 124: /* on_opt ::= */ case 142: /* having_opt ::= */ yytestcase(yyruleno==142); case 144: /* limit_opt ::= */ yytestcase(yyruleno==144); case 149: /* where_opt ::= */ yytestcase(yyruleno==149); - case 217: /* case_else ::= */ yytestcase(yyruleno==217); - case 219: /* case_operand ::= */ yytestcase(yyruleno==219); - case 238: /* vinto ::= */ yytestcase(yyruleno==238); -{yymsp[1].minor.yy202 = 0;} + case 151: /* where_opt_ret ::= */ yytestcase(yyruleno==151); + case 224: /* case_else ::= */ yytestcase(yyruleno==224); + case 226: /* case_operand ::= */ yytestcase(yyruleno==226); + case 245: /* vinto ::= */ yytestcase(yyruleno==245); +{yymsp[1].minor.yy602 = 0;} break; case 126: /* indexed_opt ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} @@ -158897,129 +161059,144 @@ static YYACTIONTYPE yy_reduce( {yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;} break; case 128: /* using_opt ::= USING LP idlist RP */ -{yymsp[-3].minor.yy600 = yymsp[-1].minor.yy600;} +{yymsp[-3].minor.yy288 = yymsp[-1].minor.yy288;} break; case 129: /* using_opt ::= */ - case 164: /* idlist_opt ::= */ yytestcase(yyruleno==164); -{yymsp[1].minor.yy600 = 0;} + case 171: /* idlist_opt ::= */ yytestcase(yyruleno==171); +{yymsp[1].minor.yy288 = 0;} break; case 131: /* orderby_opt ::= ORDER BY sortlist */ case 141: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==141); -{yymsp[-2].minor.yy242 = yymsp[0].minor.yy242;} +{yymsp[-2].minor.yy338 = yymsp[0].minor.yy338;} break; case 132: /* sortlist ::= sortlist COMMA expr sortorder nulls */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242,yymsp[-2].minor.yy202); - sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy242,yymsp[-1].minor.yy192,yymsp[0].minor.yy192); + yymsp[-4].minor.yy338 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy338,yymsp[-2].minor.yy602); + sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy338,yymsp[-1].minor.yy60,yymsp[0].minor.yy60); } break; case 133: /* sortlist ::= expr sortorder nulls */ { - yymsp[-2].minor.yy242 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy202); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy242,yymsp[-1].minor.yy192,yymsp[0].minor.yy192); + yymsp[-2].minor.yy338 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy602); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy338,yymsp[-1].minor.yy60,yymsp[0].minor.yy60); } break; case 134: /* sortorder ::= ASC */ -{yymsp[0].minor.yy192 = SQLITE_SO_ASC;} +{yymsp[0].minor.yy60 = SQLITE_SO_ASC;} break; case 135: /* sortorder ::= DESC */ -{yymsp[0].minor.yy192 = SQLITE_SO_DESC;} +{yymsp[0].minor.yy60 = SQLITE_SO_DESC;} break; case 136: /* sortorder ::= */ case 139: /* nulls ::= */ yytestcase(yyruleno==139); -{yymsp[1].minor.yy192 = SQLITE_SO_UNDEFINED;} +{yymsp[1].minor.yy60 = SQLITE_SO_UNDEFINED;} break; case 137: /* nulls ::= NULLS FIRST */ -{yymsp[-1].minor.yy192 = SQLITE_SO_ASC;} +{yymsp[-1].minor.yy60 = SQLITE_SO_ASC;} break; case 138: /* nulls ::= NULLS LAST */ -{yymsp[-1].minor.yy192 = SQLITE_SO_DESC;} +{yymsp[-1].minor.yy60 = SQLITE_SO_DESC;} break; case 145: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy202 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy202,0);} +{yymsp[-1].minor.yy602 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy602,0);} break; case 146: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy202 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);} +{yymsp[-3].minor.yy602 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy602,yymsp[0].minor.yy602);} break; case 147: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy202 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy202,yymsp[-2].minor.yy202);} +{yymsp[-3].minor.yy602 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy602,yymsp[-2].minor.yy602);} break; - case 148: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt */ + case 148: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy47, &yymsp[-1].minor.yy0); - sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy47,yymsp[0].minor.yy202,0,0); + sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy291, &yymsp[-1].minor.yy0); + sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy291,yymsp[0].minor.yy602,0,0); } break; - case 151: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt */ + case 153: /* where_opt_ret ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy338); yymsp[-1].minor.yy602 = 0;} + break; + case 154: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy338); yymsp[-3].minor.yy602 = yymsp[-2].minor.yy602;} + break; + case 155: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy47, &yymsp[-4].minor.yy0); - sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy242,"set list"); - yymsp[-5].minor.yy47 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy47, yymsp[-1].minor.yy47); - sqlite3Update(pParse,yymsp[-5].minor.yy47,yymsp[-2].minor.yy242,yymsp[0].minor.yy202,yymsp[-6].minor.yy192,0,0,0); + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy291, &yymsp[-4].minor.yy0); + sqlite3ExprListCheckLength(pParse,yymsp[-2].minor.yy338,"set list"); + yymsp[-5].minor.yy291 = sqlite3SrcListAppendList(pParse, yymsp[-5].minor.yy291, yymsp[-1].minor.yy291); + sqlite3Update(pParse,yymsp[-5].minor.yy291,yymsp[-2].minor.yy338,yymsp[0].minor.yy602,yymsp[-6].minor.yy60,0,0,0); } break; - case 152: /* setlist ::= setlist COMMA nm EQ expr */ + case 156: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy242, yymsp[0].minor.yy202); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy242, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy338 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy338, yymsp[0].minor.yy602); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy338, &yymsp[-2].minor.yy0, 1); } break; - case 153: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ + case 157: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ { - yymsp[-6].minor.yy242 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy242, yymsp[-3].minor.yy600, yymsp[0].minor.yy202); + yymsp[-6].minor.yy338 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy338, yymsp[-3].minor.yy288, yymsp[0].minor.yy602); } break; - case 154: /* setlist ::= nm EQ expr */ + case 158: /* setlist ::= nm EQ expr */ { - yylhsminor.yy242 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy202); - sqlite3ExprListSetName(pParse, yylhsminor.yy242, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy338 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy602); + sqlite3ExprListSetName(pParse, yylhsminor.yy338, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy242 = yylhsminor.yy242; + yymsp[-2].minor.yy338 = yylhsminor.yy338; break; - case 155: /* setlist ::= LP idlist RP EQ expr */ + case 159: /* setlist ::= LP idlist RP EQ expr */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy600, yymsp[0].minor.yy202); + yymsp[-4].minor.yy338 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy288, yymsp[0].minor.yy602); } break; - case 156: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + case 160: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy47, yymsp[-1].minor.yy539, yymsp[-2].minor.yy600, yymsp[-5].minor.yy192, yymsp[0].minor.yy318); + sqlite3Insert(pParse, yymsp[-3].minor.yy291, yymsp[-1].minor.yy307, yymsp[-2].minor.yy288, yymsp[-5].minor.yy60, yymsp[0].minor.yy178); } break; - case 157: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */ + case 161: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy47, 0, yymsp[-2].minor.yy600, yymsp[-5].minor.yy192, 0); + sqlite3Insert(pParse, yymsp[-4].minor.yy291, 0, yymsp[-3].minor.yy288, yymsp[-6].minor.yy60, 0); } break; - case 158: /* upsert ::= */ -{ yymsp[1].minor.yy318 = 0; } + case 162: /* upsert ::= */ +{ yymsp[1].minor.yy178 = 0; } break; - case 159: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */ -{ yymsp[-10].minor.yy318 = sqlite3UpsertNew(pParse->db,yymsp[-7].minor.yy242,yymsp[-5].minor.yy202,yymsp[-1].minor.yy242,yymsp[0].minor.yy202);} + case 163: /* upsert ::= RETURNING selcollist */ +{ yymsp[-1].minor.yy178 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy338); } break; - case 160: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */ -{ yymsp[-7].minor.yy318 = sqlite3UpsertNew(pParse->db,yymsp[-4].minor.yy242,yymsp[-2].minor.yy202,0,0); } + case 164: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ +{ yymsp[-11].minor.yy178 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy338,yymsp[-6].minor.yy602,yymsp[-2].minor.yy338,yymsp[-1].minor.yy602,yymsp[0].minor.yy178);} break; - case 161: /* upsert ::= ON CONFLICT DO NOTHING */ -{ yymsp[-3].minor.yy318 = sqlite3UpsertNew(pParse->db,0,0,0,0); } + case 165: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ +{ yymsp[-8].minor.yy178 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy338,yymsp[-3].minor.yy602,0,0,yymsp[0].minor.yy178); } break; - case 165: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy600 = yymsp[-1].minor.yy600;} + case 166: /* upsert ::= ON CONFLICT DO NOTHING returning */ +{ yymsp[-4].minor.yy178 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } break; - case 166: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy600 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy600,&yymsp[0].minor.yy0);} + case 167: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ +{ yymsp[-7].minor.yy178 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy338,yymsp[-1].minor.yy602,0);} break; - case 167: /* idlist ::= nm */ -{yymsp[0].minor.yy600 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} + case 168: /* returning ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy338);} break; - case 168: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy202 = yymsp[-1].minor.yy202;} + case 172: /* idlist_opt ::= LP idlist RP */ +{yymsp[-2].minor.yy288 = yymsp[-1].minor.yy288;} break; - case 169: /* expr ::= ID|INDEXED */ - case 170: /* expr ::= JOIN_KW */ yytestcase(yyruleno==170); -{yymsp[0].minor.yy202=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 173: /* idlist ::= idlist COMMA nm */ +{yymsp[-2].minor.yy288 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy288,&yymsp[0].minor.yy0);} break; - case 171: /* expr ::= nm DOT nm */ + case 174: /* idlist ::= nm */ +{yymsp[0].minor.yy288 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} + break; + case 175: /* expr ::= LP expr RP */ +{yymsp[-2].minor.yy602 = yymsp[-1].minor.yy602;} + break; + case 176: /* expr ::= ID|INDEXED */ + case 177: /* expr ::= JOIN_KW */ yytestcase(yyruleno==177); +{yymsp[0].minor.yy602=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} + break; + case 178: /* expr ::= nm DOT nm */ { Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1); @@ -159027,11 +161204,11 @@ static YYACTIONTYPE yy_reduce( sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[0].minor.yy0); sqlite3RenameTokenMap(pParse, (void*)temp1, &yymsp[-2].minor.yy0); } - yylhsminor.yy202 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); + yylhsminor.yy602 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } - yymsp[-2].minor.yy202 = yylhsminor.yy202; + yymsp[-2].minor.yy602 = yylhsminor.yy602; break; - case 172: /* expr ::= nm DOT nm DOT nm */ + case 179: /* expr ::= nm DOT nm DOT nm */ { Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-4].minor.yy0, 1); Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); @@ -159041,26 +161218,26 @@ static YYACTIONTYPE yy_reduce( sqlite3RenameTokenMap(pParse, (void*)temp3, &yymsp[0].minor.yy0); sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[-2].minor.yy0); } - yylhsminor.yy202 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); + yylhsminor.yy602 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } - yymsp[-4].minor.yy202 = yylhsminor.yy202; + yymsp[-4].minor.yy602 = yylhsminor.yy602; break; - case 173: /* term ::= NULL|FLOAT|BLOB */ - case 174: /* term ::= STRING */ yytestcase(yyruleno==174); -{yymsp[0].minor.yy202=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 180: /* term ::= NULL|FLOAT|BLOB */ + case 181: /* term ::= STRING */ yytestcase(yyruleno==181); +{yymsp[0].minor.yy602=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 175: /* term ::= INTEGER */ + case 182: /* term ::= INTEGER */ { - yylhsminor.yy202 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + yylhsminor.yy602 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); } - yymsp[0].minor.yy202 = yylhsminor.yy202; + yymsp[0].minor.yy602 = yylhsminor.yy602; break; - case 176: /* expr ::= VARIABLE */ + case 183: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; - yymsp[0].minor.yy202 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy202, n); + yymsp[0].minor.yy602 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy602, n); }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers @@ -159069,159 +161246,159 @@ static YYACTIONTYPE yy_reduce( assert( t.n>=2 ); if( pParse->nested==0 ){ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy202 = 0; + yymsp[0].minor.yy602 = 0; }else{ - yymsp[0].minor.yy202 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); - if( yymsp[0].minor.yy202 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy202->iTable); + yymsp[0].minor.yy602 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy602 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy602->iTable); } } } break; - case 177: /* expr ::= expr COLLATE ID|STRING */ + case 184: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy202 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy202, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy602 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy602, &yymsp[0].minor.yy0, 1); } break; - case 178: /* expr ::= CAST LP expr AS typetoken RP */ + case 185: /* expr ::= CAST LP expr AS typetoken RP */ { - yymsp[-5].minor.yy202 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); - sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy202, yymsp[-3].minor.yy202, 0); + yymsp[-5].minor.yy602 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy602, yymsp[-3].minor.yy602, 0); } break; - case 179: /* expr ::= ID|INDEXED LP distinct exprlist RP */ + case 186: /* expr ::= ID|INDEXED LP distinct exprlist RP */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy242, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy192); + yylhsminor.yy602 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy338, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy60); } - yymsp[-4].minor.yy202 = yylhsminor.yy202; + yymsp[-4].minor.yy602 = yylhsminor.yy602; break; - case 180: /* expr ::= ID|INDEXED LP STAR RP */ + case 187: /* expr ::= ID|INDEXED LP STAR RP */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); + yylhsminor.yy602 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } - yymsp[-3].minor.yy202 = yylhsminor.yy202; + yymsp[-3].minor.yy602 = yylhsminor.yy602; break; - case 181: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + case 188: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy242, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy192); - sqlite3WindowAttach(pParse, yylhsminor.yy202, yymsp[0].minor.yy303); + yylhsminor.yy602 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy338, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy60); + sqlite3WindowAttach(pParse, yylhsminor.yy602, yymsp[0].minor.yy19); } - yymsp[-5].minor.yy202 = yylhsminor.yy202; + yymsp[-5].minor.yy602 = yylhsminor.yy602; break; - case 182: /* expr ::= ID|INDEXED LP STAR RP filter_over */ + case 189: /* expr ::= ID|INDEXED LP STAR RP filter_over */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); - sqlite3WindowAttach(pParse, yylhsminor.yy202, yymsp[0].minor.yy303); + yylhsminor.yy602 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); + sqlite3WindowAttach(pParse, yylhsminor.yy602, yymsp[0].minor.yy19); } - yymsp[-4].minor.yy202 = yylhsminor.yy202; + yymsp[-4].minor.yy602 = yylhsminor.yy602; break; - case 183: /* term ::= CTIME_KW */ + case 190: /* term ::= CTIME_KW */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); + yylhsminor.yy602 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } - yymsp[0].minor.yy202 = yylhsminor.yy202; + yymsp[0].minor.yy602 = yylhsminor.yy602; break; - case 184: /* expr ::= LP nexprlist COMMA expr RP */ + case 191: /* expr ::= LP nexprlist COMMA expr RP */ { - ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy242, yymsp[-1].minor.yy202); - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); - if( yymsp[-4].minor.yy202 ){ - yymsp[-4].minor.yy202->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy338, yymsp[-1].minor.yy602); + yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yymsp[-4].minor.yy602 ){ + yymsp[-4].minor.yy602->x.pList = pList; if( ALWAYS(pList->nExpr) ){ - yymsp[-4].minor.yy202->flags |= pList->a[0].pExpr->flags & EP_Propagate; + yymsp[-4].minor.yy602->flags |= pList->a[0].pExpr->flags & EP_Propagate; } }else{ sqlite3ExprListDelete(pParse->db, pList); } } break; - case 185: /* expr ::= expr AND expr */ -{yymsp[-2].minor.yy202=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);} + case 192: /* expr ::= expr AND expr */ +{yymsp[-2].minor.yy602=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy602,yymsp[0].minor.yy602);} break; - case 186: /* expr ::= expr OR expr */ - case 187: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==187); - case 188: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==188); - case 189: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==189); - case 190: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==190); - case 191: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==191); - case 192: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==192); -{yymsp[-2].minor.yy202=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);} + case 193: /* expr ::= expr OR expr */ + case 194: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==194); + case 195: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==195); + case 196: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==196); + case 197: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==197); + case 198: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==198); + case 199: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==199); +{yymsp[-2].minor.yy602=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy602,yymsp[0].minor.yy602);} break; - case 193: /* likeop ::= NOT LIKE_KW|MATCH */ + case 200: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} break; - case 194: /* expr ::= expr likeop expr */ + case 201: /* expr ::= expr likeop expr */ { ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; yymsp[-1].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy202); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy202); - yymsp[-2].minor.yy202 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); - if( bNot ) yymsp[-2].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy202, 0); - if( yymsp[-2].minor.yy202 ) yymsp[-2].minor.yy202->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy602); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy602); + yymsp[-2].minor.yy602 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + if( bNot ) yymsp[-2].minor.yy602 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy602, 0); + if( yymsp[-2].minor.yy602 ) yymsp[-2].minor.yy602->flags |= EP_InfixFunc; } break; - case 195: /* expr ::= expr likeop expr ESCAPE expr */ + case 202: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; yymsp[-3].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy202); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy202); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy202); - yymsp[-4].minor.yy202 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); - if( bNot ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); - if( yymsp[-4].minor.yy202 ) yymsp[-4].minor.yy202->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy602); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy602); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy602); + yymsp[-4].minor.yy602 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); + if( bNot ) yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy602, 0); + if( yymsp[-4].minor.yy602 ) yymsp[-4].minor.yy602->flags |= EP_InfixFunc; } break; - case 196: /* expr ::= expr ISNULL|NOTNULL */ -{yymsp[-1].minor.yy202 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy202,0);} + case 203: /* expr ::= expr ISNULL|NOTNULL */ +{yymsp[-1].minor.yy602 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy602,0);} break; - case 197: /* expr ::= expr NOT NULL */ -{yymsp[-2].minor.yy202 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy202,0);} + case 204: /* expr ::= expr NOT NULL */ +{yymsp[-2].minor.yy602 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy602,0);} break; - case 198: /* expr ::= expr IS expr */ + case 205: /* expr ::= expr IS expr */ { - yymsp[-2].minor.yy202 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy202,yymsp[0].minor.yy202); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy202, yymsp[-2].minor.yy202, TK_ISNULL); + yymsp[-2].minor.yy602 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy602,yymsp[0].minor.yy602); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy602, yymsp[-2].minor.yy602, TK_ISNULL); } break; - case 199: /* expr ::= expr IS NOT expr */ + case 206: /* expr ::= expr IS NOT expr */ { - yymsp[-3].minor.yy202 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy202,yymsp[0].minor.yy202); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy202, yymsp[-3].minor.yy202, TK_NOTNULL); + yymsp[-3].minor.yy602 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy602,yymsp[0].minor.yy602); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy602, yymsp[-3].minor.yy602, TK_NOTNULL); } break; - case 200: /* expr ::= NOT expr */ - case 201: /* expr ::= BITNOT expr */ yytestcase(yyruleno==201); -{yymsp[-1].minor.yy202 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy202, 0);/*A-overwrites-B*/} + case 207: /* expr ::= NOT expr */ + case 208: /* expr ::= BITNOT expr */ yytestcase(yyruleno==208); +{yymsp[-1].minor.yy602 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy602, 0);/*A-overwrites-B*/} break; - case 202: /* expr ::= PLUS|MINUS expr */ + case 209: /* expr ::= PLUS|MINUS expr */ { - yymsp[-1].minor.yy202 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy202, 0); + yymsp[-1].minor.yy602 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy602, 0); /*A-overwrites-B*/ } break; - case 203: /* between_op ::= BETWEEN */ - case 206: /* in_op ::= IN */ yytestcase(yyruleno==206); -{yymsp[0].minor.yy192 = 0;} + case 210: /* between_op ::= BETWEEN */ + case 213: /* in_op ::= IN */ yytestcase(yyruleno==213); +{yymsp[0].minor.yy60 = 0;} break; - case 205: /* expr ::= expr between_op expr AND expr */ + case 212: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy202); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy202); - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy202, 0); - if( yymsp[-4].minor.yy202 ){ - yymsp[-4].minor.yy202->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy602); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy602); + yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy602, 0); + if( yymsp[-4].minor.yy602 ){ + yymsp[-4].minor.yy602->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + if( yymsp[-3].minor.yy60 ) yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy602, 0); } break; - case 208: /* expr ::= expr in_op LP exprlist RP */ + case 215: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy242==0 ){ + if( yymsp[-1].minor.yy338==0 ){ /* Expressions of the form ** ** expr1 IN () @@ -159230,197 +161407,197 @@ static YYACTIONTYPE yy_reduce( ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy202); - yymsp[-4].minor.yy202 = sqlite3Expr(pParse->db, TK_INTEGER, yymsp[-3].minor.yy192 ? "1" : "0"); - }else if( yymsp[-1].minor.yy242->nExpr==1 && sqlite3ExprIsConstant(yymsp[-1].minor.yy242->a[0].pExpr) ){ - Expr *pRHS = yymsp[-1].minor.yy242->a[0].pExpr; - yymsp[-1].minor.yy242->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy242); + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy602); + yymsp[-4].minor.yy602 = sqlite3Expr(pParse->db, TK_INTEGER, yymsp[-3].minor.yy60 ? "1" : "0"); + }else if( yymsp[-1].minor.yy338->nExpr==1 && sqlite3ExprIsConstant(yymsp[-1].minor.yy338->a[0].pExpr) ){ + Expr *pRHS = yymsp[-1].minor.yy338->a[0].pExpr; + yymsp[-1].minor.yy338->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy338); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy202, pRHS); - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy602, pRHS); + if( yymsp[-3].minor.yy60 ) yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy602, 0); }else{ - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy202, 0); - if( yymsp[-4].minor.yy202 ){ - yymsp[-4].minor.yy202->x.pList = yymsp[-1].minor.yy242; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy202); + yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy602, 0); + if( yymsp[-4].minor.yy602 ){ + yymsp[-4].minor.yy602->x.pList = yymsp[-1].minor.yy338; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy602); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy242); + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy338); } - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + if( yymsp[-3].minor.yy60 ) yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy602, 0); } } break; - case 209: /* expr ::= LP select RP */ + case 216: /* expr ::= LP select RP */ { - yymsp[-2].minor.yy202 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); - sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy202, yymsp[-1].minor.yy539); + yymsp[-2].minor.yy602 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy602, yymsp[-1].minor.yy307); } break; - case 210: /* expr ::= expr in_op LP select RP */ + case 217: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy202, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy202, yymsp[-1].minor.yy539); - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy602, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy602, yymsp[-1].minor.yy307); + if( yymsp[-3].minor.yy60 ) yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy602, 0); } break; - case 211: /* expr ::= expr in_op nm dbnm paren_exprlist */ + case 218: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); - if( yymsp[0].minor.yy242 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy242); - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy202, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy202, pSelect); - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + if( yymsp[0].minor.yy338 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy338); + yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy602, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy602, pSelect); + if( yymsp[-3].minor.yy60 ) yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy602, 0); } break; - case 212: /* expr ::= EXISTS LP select RP */ + case 219: /* expr ::= EXISTS LP select RP */ { Expr *p; - p = yymsp[-3].minor.yy202 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); - sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy539); + p = yymsp[-3].minor.yy602 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy307); } break; - case 213: /* expr ::= CASE case_operand case_exprlist case_else END */ + case 220: /* expr ::= CASE case_operand case_exprlist case_else END */ { - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy202, 0); - if( yymsp[-4].minor.yy202 ){ - yymsp[-4].minor.yy202->x.pList = yymsp[-1].minor.yy202 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy242,yymsp[-1].minor.yy202) : yymsp[-2].minor.yy242; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy202); + yymsp[-4].minor.yy602 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy602, 0); + if( yymsp[-4].minor.yy602 ){ + yymsp[-4].minor.yy602->x.pList = yymsp[-1].minor.yy602 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy338,yymsp[-1].minor.yy602) : yymsp[-2].minor.yy338; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy602); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy242); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy202); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy338); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy602); } } break; - case 214: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ + case 221: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242, yymsp[-2].minor.yy202); - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242, yymsp[0].minor.yy202); + yymsp[-4].minor.yy338 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy338, yymsp[-2].minor.yy602); + yymsp[-4].minor.yy338 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy338, yymsp[0].minor.yy602); } break; - case 215: /* case_exprlist ::= WHEN expr THEN expr */ + case 222: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy242 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy202); - yymsp[-3].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy242, yymsp[0].minor.yy202); + yymsp[-3].minor.yy338 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy602); + yymsp[-3].minor.yy338 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy338, yymsp[0].minor.yy602); } break; - case 218: /* case_operand ::= expr */ -{yymsp[0].minor.yy202 = yymsp[0].minor.yy202; /*A-overwrites-X*/} + case 225: /* case_operand ::= expr */ +{yymsp[0].minor.yy602 = yymsp[0].minor.yy602; /*A-overwrites-X*/} break; - case 221: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy242,yymsp[0].minor.yy202);} + case 228: /* nexprlist ::= nexprlist COMMA expr */ +{yymsp[-2].minor.yy338 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy338,yymsp[0].minor.yy602);} break; - case 222: /* nexprlist ::= expr */ -{yymsp[0].minor.yy242 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy202); /*A-overwrites-Y*/} + case 229: /* nexprlist ::= expr */ +{yymsp[0].minor.yy338 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy602); /*A-overwrites-Y*/} break; - case 224: /* paren_exprlist ::= LP exprlist RP */ - case 229: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==229); -{yymsp[-2].minor.yy242 = yymsp[-1].minor.yy242;} + case 231: /* paren_exprlist ::= LP exprlist RP */ + case 236: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==236); +{yymsp[-2].minor.yy338 = yymsp[-1].minor.yy338;} break; - case 225: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + case 232: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy242, yymsp[-10].minor.yy192, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy202, SQLITE_SO_ASC, yymsp[-8].minor.yy192, SQLITE_IDXTYPE_APPDEF); + sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy338, yymsp[-10].minor.yy60, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy602, SQLITE_SO_ASC, yymsp[-8].minor.yy60, SQLITE_IDXTYPE_APPDEF); if( IN_RENAME_OBJECT && pParse->pNewIndex ){ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0); } } break; - case 226: /* uniqueflag ::= UNIQUE */ - case 268: /* raisetype ::= ABORT */ yytestcase(yyruleno==268); -{yymsp[0].minor.yy192 = OE_Abort;} + case 233: /* uniqueflag ::= UNIQUE */ + case 275: /* raisetype ::= ABORT */ yytestcase(yyruleno==275); +{yymsp[0].minor.yy60 = OE_Abort;} break; - case 227: /* uniqueflag ::= */ -{yymsp[1].minor.yy192 = OE_None;} + case 234: /* uniqueflag ::= */ +{yymsp[1].minor.yy60 = OE_None;} break; - case 230: /* eidlist ::= eidlist COMMA nm collate sortorder */ + case 237: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy242 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy242, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy192, yymsp[0].minor.yy192); + yymsp[-4].minor.yy338 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy338, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy60, yymsp[0].minor.yy60); } break; - case 231: /* eidlist ::= nm collate sortorder */ + case 238: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy242 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy192, yymsp[0].minor.yy192); /*A-overwrites-Y*/ + yymsp[-2].minor.yy338 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy60, yymsp[0].minor.yy60); /*A-overwrites-Y*/ } break; - case 234: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy47, yymsp[-1].minor.yy192);} + case 241: /* cmd ::= DROP INDEX ifexists fullname */ +{sqlite3DropIndex(pParse, yymsp[0].minor.yy291, yymsp[-1].minor.yy60);} break; - case 235: /* cmd ::= VACUUM vinto */ -{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy202);} + case 242: /* cmd ::= VACUUM vinto */ +{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy602);} break; - case 236: /* cmd ::= VACUUM nm vinto */ -{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy202);} + case 243: /* cmd ::= VACUUM nm vinto */ +{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy602);} break; - case 239: /* cmd ::= PRAGMA nm dbnm */ + case 246: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} break; - case 240: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ + case 247: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} break; - case 241: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ + case 248: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} break; - case 242: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ + case 249: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} break; - case 243: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ + case 250: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} break; - case 246: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + case 253: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ { Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy447, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy483, &all); } break; - case 247: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + case 254: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy192, yymsp[-4].minor.yy230.a, yymsp[-4].minor.yy230.b, yymsp[-2].minor.yy47, yymsp[0].minor.yy202, yymsp[-10].minor.yy192, yymsp[-8].minor.yy192); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy60, yymsp[-4].minor.yy50.a, yymsp[-4].minor.yy50.b, yymsp[-2].minor.yy291, yymsp[0].minor.yy602, yymsp[-10].minor.yy60, yymsp[-8].minor.yy60); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; - case 248: /* trigger_time ::= BEFORE|AFTER */ -{ yymsp[0].minor.yy192 = yymsp[0].major; /*A-overwrites-X*/ } + case 255: /* trigger_time ::= BEFORE|AFTER */ +{ yymsp[0].minor.yy60 = yymsp[0].major; /*A-overwrites-X*/ } break; - case 249: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy192 = TK_INSTEAD;} + case 256: /* trigger_time ::= INSTEAD OF */ +{ yymsp[-1].minor.yy60 = TK_INSTEAD;} break; - case 250: /* trigger_time ::= */ -{ yymsp[1].minor.yy192 = TK_BEFORE; } + case 257: /* trigger_time ::= */ +{ yymsp[1].minor.yy60 = TK_BEFORE; } break; - case 251: /* trigger_event ::= DELETE|INSERT */ - case 252: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==252); -{yymsp[0].minor.yy230.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy230.b = 0;} + case 258: /* trigger_event ::= DELETE|INSERT */ + case 259: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==259); +{yymsp[0].minor.yy50.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy50.b = 0;} break; - case 253: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy230.a = TK_UPDATE; yymsp[-2].minor.yy230.b = yymsp[0].minor.yy600;} + case 260: /* trigger_event ::= UPDATE OF idlist */ +{yymsp[-2].minor.yy50.a = TK_UPDATE; yymsp[-2].minor.yy50.b = yymsp[0].minor.yy288;} break; - case 254: /* when_clause ::= */ - case 273: /* key_opt ::= */ yytestcase(yyruleno==273); -{ yymsp[1].minor.yy202 = 0; } + case 261: /* when_clause ::= */ + case 280: /* key_opt ::= */ yytestcase(yyruleno==280); +{ yymsp[1].minor.yy602 = 0; } break; - case 255: /* when_clause ::= WHEN expr */ - case 274: /* key_opt ::= KEY expr */ yytestcase(yyruleno==274); -{ yymsp[-1].minor.yy202 = yymsp[0].minor.yy202; } + case 262: /* when_clause ::= WHEN expr */ + case 281: /* key_opt ::= KEY expr */ yytestcase(yyruleno==281); +{ yymsp[-1].minor.yy602 = yymsp[0].minor.yy602; } break; - case 256: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + case 263: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy447!=0 ); - yymsp[-2].minor.yy447->pLast->pNext = yymsp[-1].minor.yy447; - yymsp[-2].minor.yy447->pLast = yymsp[-1].minor.yy447; + assert( yymsp[-2].minor.yy483!=0 ); + yymsp[-2].minor.yy483->pLast->pNext = yymsp[-1].minor.yy483; + yymsp[-2].minor.yy483->pLast = yymsp[-1].minor.yy483; } break; - case 257: /* trigger_cmd_list ::= trigger_cmd SEMI */ + case 264: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy447!=0 ); - yymsp[-1].minor.yy447->pLast = yymsp[-1].minor.yy447; + assert( yymsp[-1].minor.yy483!=0 ); + yymsp[-1].minor.yy483->pLast = yymsp[-1].minor.yy483; } break; - case 258: /* trnm ::= nm DOT nm */ + case 265: /* trnm ::= nm DOT nm */ { yymsp[-2].minor.yy0 = yymsp[0].minor.yy0; sqlite3ErrorMsg(pParse, @@ -159428,344 +161605,364 @@ static YYACTIONTYPE yy_reduce( "statements within triggers"); } break; - case 259: /* tridxby ::= INDEXED BY nm */ + case 266: /* tridxby ::= INDEXED BY nm */ { sqlite3ErrorMsg(pParse, "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 260: /* tridxby ::= NOT INDEXED */ + case 267: /* tridxby ::= NOT INDEXED */ { sqlite3ErrorMsg(pParse, "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 261: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ -{yylhsminor.yy447 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy47, yymsp[-3].minor.yy242, yymsp[-1].minor.yy202, yymsp[-7].minor.yy192, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy436);} - yymsp[-8].minor.yy447 = yylhsminor.yy447; + case 268: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ +{yylhsminor.yy483 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy291, yymsp[-3].minor.yy338, yymsp[-1].minor.yy602, yymsp[-7].minor.yy60, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy528);} + yymsp[-8].minor.yy483 = yylhsminor.yy483; break; - case 262: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + case 269: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { - yylhsminor.yy447 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy600,yymsp[-2].minor.yy539,yymsp[-6].minor.yy192,yymsp[-1].minor.yy318,yymsp[-7].minor.yy436,yymsp[0].minor.yy436);/*yylhsminor.yy447-overwrites-yymsp[-6].minor.yy192*/ + yylhsminor.yy483 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy288,yymsp[-2].minor.yy307,yymsp[-6].minor.yy60,yymsp[-1].minor.yy178,yymsp[-7].minor.yy528,yymsp[0].minor.yy528);/*yylhsminor.yy483-overwrites-yymsp[-6].minor.yy60*/ } - yymsp[-7].minor.yy447 = yylhsminor.yy447; + yymsp[-7].minor.yy483 = yylhsminor.yy483; break; - case 263: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -{yylhsminor.yy447 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy202, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy436);} - yymsp[-5].minor.yy447 = yylhsminor.yy447; + case 270: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ +{yylhsminor.yy483 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy602, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy528);} + yymsp[-5].minor.yy483 = yylhsminor.yy483; break; - case 264: /* trigger_cmd ::= scanpt select scanpt */ -{yylhsminor.yy447 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy539, yymsp[-2].minor.yy436, yymsp[0].minor.yy436); /*yylhsminor.yy447-overwrites-yymsp[-1].minor.yy539*/} - yymsp[-2].minor.yy447 = yylhsminor.yy447; + case 271: /* trigger_cmd ::= scanpt select scanpt */ +{yylhsminor.yy483 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy307, yymsp[-2].minor.yy528, yymsp[0].minor.yy528); /*yylhsminor.yy483-overwrites-yymsp[-1].minor.yy307*/} + yymsp[-2].minor.yy483 = yylhsminor.yy483; break; - case 265: /* expr ::= RAISE LP IGNORE RP */ + case 272: /* expr ::= RAISE LP IGNORE RP */ { - yymsp[-3].minor.yy202 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); - if( yymsp[-3].minor.yy202 ){ - yymsp[-3].minor.yy202->affExpr = OE_Ignore; + yymsp[-3].minor.yy602 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy602 ){ + yymsp[-3].minor.yy602->affExpr = OE_Ignore; } } break; - case 266: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 273: /* expr ::= RAISE LP raisetype COMMA nm RP */ { - yymsp[-5].minor.yy202 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); - if( yymsp[-5].minor.yy202 ) { - yymsp[-5].minor.yy202->affExpr = (char)yymsp[-3].minor.yy192; + yymsp[-5].minor.yy602 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); + if( yymsp[-5].minor.yy602 ) { + yymsp[-5].minor.yy602->affExpr = (char)yymsp[-3].minor.yy60; } } break; - case 267: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy192 = OE_Rollback;} + case 274: /* raisetype ::= ROLLBACK */ +{yymsp[0].minor.yy60 = OE_Rollback;} break; - case 269: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy192 = OE_Fail;} + case 276: /* raisetype ::= FAIL */ +{yymsp[0].minor.yy60 = OE_Fail;} break; - case 270: /* cmd ::= DROP TRIGGER ifexists fullname */ + case 277: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy192); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy291,yymsp[-1].minor.yy60); } break; - case 271: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + case 278: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy202, yymsp[-1].minor.yy202, yymsp[0].minor.yy202); + sqlite3Attach(pParse, yymsp[-3].minor.yy602, yymsp[-1].minor.yy602, yymsp[0].minor.yy602); } break; - case 272: /* cmd ::= DETACH database_kw_opt expr */ + case 279: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy202); + sqlite3Detach(pParse, yymsp[0].minor.yy602); } break; - case 275: /* cmd ::= REINDEX */ + case 282: /* cmd ::= REINDEX */ {sqlite3Reindex(pParse, 0, 0);} break; - case 276: /* cmd ::= REINDEX nm dbnm */ + case 283: /* cmd ::= REINDEX nm dbnm */ {sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 277: /* cmd ::= ANALYZE */ + case 284: /* cmd ::= ANALYZE */ {sqlite3Analyze(pParse, 0, 0);} break; - case 278: /* cmd ::= ANALYZE nm dbnm */ + case 285: /* cmd ::= ANALYZE nm dbnm */ {sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 279: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ + case 286: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy47,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy291,&yymsp[0].minor.yy0); } break; - case 280: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + case 287: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ { yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n; sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0); } break; - case 281: /* add_column_fullname ::= fullname */ + case 288: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ +{ + sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy291, &yymsp[0].minor.yy0); +} + break; + case 289: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy47); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy291); } break; - case 282: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + case 290: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { - sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy47, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy291, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 283: /* cmd ::= create_vtab */ + case 291: /* cmd ::= create_vtab */ {sqlite3VtabFinishParse(pParse,0);} break; - case 284: /* cmd ::= create_vtab LP vtabarglist RP */ + case 292: /* cmd ::= create_vtab LP vtabarglist RP */ {sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} break; - case 285: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + case 293: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy192); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy60); } break; - case 286: /* vtabarg ::= */ + case 294: /* vtabarg ::= */ {sqlite3VtabArgInit(pParse);} break; - case 287: /* vtabargtoken ::= ANY */ - case 288: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==288); - case 289: /* lp ::= LP */ yytestcase(yyruleno==289); + case 295: /* vtabargtoken ::= ANY */ + case 296: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==296); + case 297: /* lp ::= LP */ yytestcase(yyruleno==297); {sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} break; - case 290: /* with ::= WITH wqlist */ - case 291: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==291); -{ sqlite3WithPush(pParse, yymsp[0].minor.yy131, 1); } + case 298: /* with ::= WITH wqlist */ + case 299: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==299); +{ sqlite3WithPush(pParse, yymsp[0].minor.yy195, 1); } break; - case 292: /* wqlist ::= nm eidlist_opt AS LP select RP */ + case 300: /* wqas ::= AS */ +{yymsp[0].minor.yy570 = M10d_Any;} + break; + case 301: /* wqas ::= AS MATERIALIZED */ +{yymsp[-1].minor.yy570 = M10d_Yes;} + break; + case 302: /* wqas ::= AS NOT MATERIALIZED */ +{yymsp[-2].minor.yy570 = M10d_No;} + break; + case 303: /* wqitem ::= nm eidlist_opt wqas LP select RP */ { - yymsp[-5].minor.yy131 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy242, yymsp[-1].minor.yy539); /*A-overwrites-X*/ + yymsp[-5].minor.yy607 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy338, yymsp[-1].minor.yy307, yymsp[-3].minor.yy570); /*A-overwrites-X*/ } break; - case 293: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */ + case 304: /* wqlist ::= wqitem */ { - yymsp[-7].minor.yy131 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy131, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy242, yymsp[-1].minor.yy539); + yymsp[0].minor.yy195 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy607); /*A-overwrites-X*/ } break; - case 294: /* windowdefn_list ::= windowdefn */ -{ yylhsminor.yy303 = yymsp[0].minor.yy303; } - yymsp[0].minor.yy303 = yylhsminor.yy303; - break; - case 295: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ + case 305: /* wqlist ::= wqlist COMMA wqitem */ { - assert( yymsp[0].minor.yy303!=0 ); - sqlite3WindowChain(pParse, yymsp[0].minor.yy303, yymsp[-2].minor.yy303); - yymsp[0].minor.yy303->pNextWin = yymsp[-2].minor.yy303; - yylhsminor.yy303 = yymsp[0].minor.yy303; + yymsp[-2].minor.yy195 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy195, yymsp[0].minor.yy607); } - yymsp[-2].minor.yy303 = yylhsminor.yy303; break; - case 296: /* windowdefn ::= nm AS LP window RP */ + case 306: /* windowdefn_list ::= windowdefn */ +{ yylhsminor.yy19 = yymsp[0].minor.yy19; } + yymsp[0].minor.yy19 = yylhsminor.yy19; + break; + case 307: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { - if( ALWAYS(yymsp[-1].minor.yy303) ){ - yymsp[-1].minor.yy303->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); + assert( yymsp[0].minor.yy19!=0 ); + sqlite3WindowChain(pParse, yymsp[0].minor.yy19, yymsp[-2].minor.yy19); + yymsp[0].minor.yy19->pNextWin = yymsp[-2].minor.yy19; + yylhsminor.yy19 = yymsp[0].minor.yy19; +} + yymsp[-2].minor.yy19 = yylhsminor.yy19; + break; + case 308: /* windowdefn ::= nm AS LP window RP */ +{ + if( ALWAYS(yymsp[-1].minor.yy19) ){ + yymsp[-1].minor.yy19->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); } - yylhsminor.yy303 = yymsp[-1].minor.yy303; + yylhsminor.yy19 = yymsp[-1].minor.yy19; } - yymsp[-4].minor.yy303 = yylhsminor.yy303; + yymsp[-4].minor.yy19 = yylhsminor.yy19; break; - case 297: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + case 309: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { - yymsp[-4].minor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, yymsp[-2].minor.yy242, yymsp[-1].minor.yy242, 0); + yymsp[-4].minor.yy19 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy19, yymsp[-2].minor.yy338, yymsp[-1].minor.yy338, 0); } break; - case 298: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + case 310: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { - yylhsminor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, yymsp[-2].minor.yy242, yymsp[-1].minor.yy242, &yymsp[-5].minor.yy0); + yylhsminor.yy19 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy19, yymsp[-2].minor.yy338, yymsp[-1].minor.yy338, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy303 = yylhsminor.yy303; + yymsp[-5].minor.yy19 = yylhsminor.yy19; break; - case 299: /* window ::= ORDER BY sortlist frame_opt */ + case 311: /* window ::= ORDER BY sortlist frame_opt */ { - yymsp[-3].minor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, 0, yymsp[-1].minor.yy242, 0); + yymsp[-3].minor.yy19 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy19, 0, yymsp[-1].minor.yy338, 0); } break; - case 300: /* window ::= nm ORDER BY sortlist frame_opt */ + case 312: /* window ::= nm ORDER BY sortlist frame_opt */ { - yylhsminor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, 0, yymsp[-1].minor.yy242, &yymsp[-4].minor.yy0); + yylhsminor.yy19 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy19, 0, yymsp[-1].minor.yy338, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy303 = yylhsminor.yy303; + yymsp[-4].minor.yy19 = yylhsminor.yy19; break; - case 301: /* window ::= frame_opt */ - case 320: /* filter_over ::= over_clause */ yytestcase(yyruleno==320); + case 313: /* window ::= frame_opt */ + case 332: /* filter_over ::= over_clause */ yytestcase(yyruleno==332); { - yylhsminor.yy303 = yymsp[0].minor.yy303; + yylhsminor.yy19 = yymsp[0].minor.yy19; } - yymsp[0].minor.yy303 = yylhsminor.yy303; + yymsp[0].minor.yy19 = yylhsminor.yy19; break; - case 302: /* window ::= nm frame_opt */ + case 314: /* window ::= nm frame_opt */ { - yylhsminor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, 0, 0, &yymsp[-1].minor.yy0); + yylhsminor.yy19 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy19, 0, 0, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy303 = yylhsminor.yy303; + yymsp[-1].minor.yy19 = yylhsminor.yy19; break; - case 303: /* frame_opt ::= */ + case 315: /* frame_opt ::= */ { - yymsp[1].minor.yy303 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); + yymsp[1].minor.yy19 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; - case 304: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + case 316: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { - yylhsminor.yy303 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy192, yymsp[-1].minor.yy77.eType, yymsp[-1].minor.yy77.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy58); + yylhsminor.yy19 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy60, yymsp[-1].minor.yy113.eType, yymsp[-1].minor.yy113.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy570); } - yymsp[-2].minor.yy303 = yylhsminor.yy303; + yymsp[-2].minor.yy19 = yylhsminor.yy19; break; - case 305: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + case 317: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { - yylhsminor.yy303 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy192, yymsp[-3].minor.yy77.eType, yymsp[-3].minor.yy77.pExpr, yymsp[-1].minor.yy77.eType, yymsp[-1].minor.yy77.pExpr, yymsp[0].minor.yy58); + yylhsminor.yy19 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy60, yymsp[-3].minor.yy113.eType, yymsp[-3].minor.yy113.pExpr, yymsp[-1].minor.yy113.eType, yymsp[-1].minor.yy113.pExpr, yymsp[0].minor.yy570); } - yymsp[-5].minor.yy303 = yylhsminor.yy303; + yymsp[-5].minor.yy19 = yylhsminor.yy19; break; - case 307: /* frame_bound_s ::= frame_bound */ - case 309: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==309); -{yylhsminor.yy77 = yymsp[0].minor.yy77;} - yymsp[0].minor.yy77 = yylhsminor.yy77; + case 319: /* frame_bound_s ::= frame_bound */ + case 321: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==321); +{yylhsminor.yy113 = yymsp[0].minor.yy113;} + yymsp[0].minor.yy113 = yylhsminor.yy113; break; - case 308: /* frame_bound_s ::= UNBOUNDED PRECEDING */ - case 310: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==310); - case 312: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==312); -{yylhsminor.yy77.eType = yymsp[-1].major; yylhsminor.yy77.pExpr = 0;} - yymsp[-1].minor.yy77 = yylhsminor.yy77; + case 320: /* frame_bound_s ::= UNBOUNDED PRECEDING */ + case 322: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==322); + case 324: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==324); +{yylhsminor.yy113.eType = yymsp[-1].major; yylhsminor.yy113.pExpr = 0;} + yymsp[-1].minor.yy113 = yylhsminor.yy113; break; - case 311: /* frame_bound ::= expr PRECEDING|FOLLOWING */ -{yylhsminor.yy77.eType = yymsp[0].major; yylhsminor.yy77.pExpr = yymsp[-1].minor.yy202;} - yymsp[-1].minor.yy77 = yylhsminor.yy77; + case 323: /* frame_bound ::= expr PRECEDING|FOLLOWING */ +{yylhsminor.yy113.eType = yymsp[0].major; yylhsminor.yy113.pExpr = yymsp[-1].minor.yy602;} + yymsp[-1].minor.yy113 = yylhsminor.yy113; break; - case 313: /* frame_exclude_opt ::= */ -{yymsp[1].minor.yy58 = 0;} + case 325: /* frame_exclude_opt ::= */ +{yymsp[1].minor.yy570 = 0;} break; - case 314: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ -{yymsp[-1].minor.yy58 = yymsp[0].minor.yy58;} + case 326: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ +{yymsp[-1].minor.yy570 = yymsp[0].minor.yy570;} break; - case 315: /* frame_exclude ::= NO OTHERS */ - case 316: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==316); -{yymsp[-1].minor.yy58 = yymsp[-1].major; /*A-overwrites-X*/} + case 327: /* frame_exclude ::= NO OTHERS */ + case 328: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==328); +{yymsp[-1].minor.yy570 = yymsp[-1].major; /*A-overwrites-X*/} break; - case 317: /* frame_exclude ::= GROUP|TIES */ -{yymsp[0].minor.yy58 = yymsp[0].major; /*A-overwrites-X*/} + case 329: /* frame_exclude ::= GROUP|TIES */ +{yymsp[0].minor.yy570 = yymsp[0].major; /*A-overwrites-X*/} break; - case 318: /* window_clause ::= WINDOW windowdefn_list */ -{ yymsp[-1].minor.yy303 = yymsp[0].minor.yy303; } + case 330: /* window_clause ::= WINDOW windowdefn_list */ +{ yymsp[-1].minor.yy19 = yymsp[0].minor.yy19; } break; - case 319: /* filter_over ::= filter_clause over_clause */ + case 331: /* filter_over ::= filter_clause over_clause */ { - yymsp[0].minor.yy303->pFilter = yymsp[-1].minor.yy202; - yylhsminor.yy303 = yymsp[0].minor.yy303; + yymsp[0].minor.yy19->pFilter = yymsp[-1].minor.yy602; + yylhsminor.yy19 = yymsp[0].minor.yy19; } - yymsp[-1].minor.yy303 = yylhsminor.yy303; + yymsp[-1].minor.yy19 = yylhsminor.yy19; break; - case 321: /* filter_over ::= filter_clause */ + case 333: /* filter_over ::= filter_clause */ { - yylhsminor.yy303 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yylhsminor.yy303 ){ - yylhsminor.yy303->eFrmType = TK_FILTER; - yylhsminor.yy303->pFilter = yymsp[0].minor.yy202; + yylhsminor.yy19 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yylhsminor.yy19 ){ + yylhsminor.yy19->eFrmType = TK_FILTER; + yylhsminor.yy19->pFilter = yymsp[0].minor.yy602; }else{ - sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy202); + sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy602); } } - yymsp[0].minor.yy303 = yylhsminor.yy303; + yymsp[0].minor.yy19 = yylhsminor.yy19; break; - case 322: /* over_clause ::= OVER LP window RP */ + case 334: /* over_clause ::= OVER LP window RP */ { - yymsp[-3].minor.yy303 = yymsp[-1].minor.yy303; - assert( yymsp[-3].minor.yy303!=0 ); + yymsp[-3].minor.yy19 = yymsp[-1].minor.yy19; + assert( yymsp[-3].minor.yy19!=0 ); } break; - case 323: /* over_clause ::= OVER nm */ + case 335: /* over_clause ::= OVER nm */ { - yymsp[-1].minor.yy303 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yymsp[-1].minor.yy303 ){ - yymsp[-1].minor.yy303->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); + yymsp[-1].minor.yy19 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yymsp[-1].minor.yy19 ){ + yymsp[-1].minor.yy19->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); } } break; - case 324: /* filter_clause ::= FILTER LP WHERE expr RP */ -{ yymsp[-4].minor.yy202 = yymsp[-1].minor.yy202; } + case 336: /* filter_clause ::= FILTER LP WHERE expr RP */ +{ yymsp[-4].minor.yy602 = yymsp[-1].minor.yy602; } break; default: - /* (325) input ::= cmdlist */ yytestcase(yyruleno==325); - /* (326) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==326); - /* (327) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=327); - /* (328) ecmd ::= SEMI */ yytestcase(yyruleno==328); - /* (329) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==329); - /* (330) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=330); - /* (331) trans_opt ::= */ yytestcase(yyruleno==331); - /* (332) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==332); - /* (333) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==333); - /* (334) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==334); - /* (335) savepoint_opt ::= */ yytestcase(yyruleno==335); - /* (336) cmd ::= create_table create_table_args */ yytestcase(yyruleno==336); - /* (337) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==337); - /* (338) columnlist ::= columnname carglist */ yytestcase(yyruleno==338); - /* (339) nm ::= ID|INDEXED */ yytestcase(yyruleno==339); - /* (340) nm ::= STRING */ yytestcase(yyruleno==340); - /* (341) nm ::= JOIN_KW */ yytestcase(yyruleno==341); - /* (342) typetoken ::= typename */ yytestcase(yyruleno==342); - /* (343) typename ::= ID|STRING */ yytestcase(yyruleno==343); - /* (344) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=344); - /* (345) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=345); - /* (346) carglist ::= carglist ccons */ yytestcase(yyruleno==346); - /* (347) carglist ::= */ yytestcase(yyruleno==347); - /* (348) ccons ::= NULL onconf */ yytestcase(yyruleno==348); - /* (349) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==349); - /* (350) ccons ::= AS generated */ yytestcase(yyruleno==350); - /* (351) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==351); - /* (352) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==352); - /* (353) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=353); - /* (354) tconscomma ::= */ yytestcase(yyruleno==354); - /* (355) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=355); - /* (356) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=356); - /* (357) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=357); - /* (358) oneselect ::= values */ yytestcase(yyruleno==358); - /* (359) sclp ::= selcollist COMMA */ yytestcase(yyruleno==359); - /* (360) as ::= ID|STRING */ yytestcase(yyruleno==360); - /* (361) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=361); - /* (362) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==362); - /* (363) exprlist ::= nexprlist */ yytestcase(yyruleno==363); - /* (364) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=364); - /* (365) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=365); - /* (366) nmnum ::= ON */ yytestcase(yyruleno==366); - /* (367) nmnum ::= DELETE */ yytestcase(yyruleno==367); - /* (368) nmnum ::= DEFAULT */ yytestcase(yyruleno==368); - /* (369) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==369); - /* (370) foreach_clause ::= */ yytestcase(yyruleno==370); - /* (371) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==371); - /* (372) trnm ::= nm */ yytestcase(yyruleno==372); - /* (373) tridxby ::= */ yytestcase(yyruleno==373); - /* (374) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==374); - /* (375) database_kw_opt ::= */ yytestcase(yyruleno==375); - /* (376) kwcolumn_opt ::= */ yytestcase(yyruleno==376); - /* (377) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==377); - /* (378) vtabarglist ::= vtabarg */ yytestcase(yyruleno==378); - /* (379) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==379); - /* (380) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==380); - /* (381) anylist ::= */ yytestcase(yyruleno==381); - /* (382) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==382); - /* (383) anylist ::= anylist ANY */ yytestcase(yyruleno==383); - /* (384) with ::= */ yytestcase(yyruleno==384); + /* (337) input ::= cmdlist */ yytestcase(yyruleno==337); + /* (338) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==338); + /* (339) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=339); + /* (340) ecmd ::= SEMI */ yytestcase(yyruleno==340); + /* (341) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==341); + /* (342) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=342); + /* (343) trans_opt ::= */ yytestcase(yyruleno==343); + /* (344) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==344); + /* (345) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==345); + /* (346) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==346); + /* (347) savepoint_opt ::= */ yytestcase(yyruleno==347); + /* (348) cmd ::= create_table create_table_args */ yytestcase(yyruleno==348); + /* (349) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==349); + /* (350) columnlist ::= columnname carglist */ yytestcase(yyruleno==350); + /* (351) nm ::= ID|INDEXED */ yytestcase(yyruleno==351); + /* (352) nm ::= STRING */ yytestcase(yyruleno==352); + /* (353) nm ::= JOIN_KW */ yytestcase(yyruleno==353); + /* (354) typetoken ::= typename */ yytestcase(yyruleno==354); + /* (355) typename ::= ID|STRING */ yytestcase(yyruleno==355); + /* (356) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=356); + /* (357) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=357); + /* (358) carglist ::= carglist ccons */ yytestcase(yyruleno==358); + /* (359) carglist ::= */ yytestcase(yyruleno==359); + /* (360) ccons ::= NULL onconf */ yytestcase(yyruleno==360); + /* (361) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==361); + /* (362) ccons ::= AS generated */ yytestcase(yyruleno==362); + /* (363) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==363); + /* (364) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==364); + /* (365) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=365); + /* (366) tconscomma ::= */ yytestcase(yyruleno==366); + /* (367) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=367); + /* (368) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=368); + /* (369) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=369); + /* (370) oneselect ::= values */ yytestcase(yyruleno==370); + /* (371) sclp ::= selcollist COMMA */ yytestcase(yyruleno==371); + /* (372) as ::= ID|STRING */ yytestcase(yyruleno==372); + /* (373) returning ::= */ yytestcase(yyruleno==373); + /* (374) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=374); + /* (375) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==375); + /* (376) exprlist ::= nexprlist */ yytestcase(yyruleno==376); + /* (377) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=377); + /* (378) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=378); + /* (379) nmnum ::= ON */ yytestcase(yyruleno==379); + /* (380) nmnum ::= DELETE */ yytestcase(yyruleno==380); + /* (381) nmnum ::= DEFAULT */ yytestcase(yyruleno==381); + /* (382) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==382); + /* (383) foreach_clause ::= */ yytestcase(yyruleno==383); + /* (384) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==384); + /* (385) trnm ::= nm */ yytestcase(yyruleno==385); + /* (386) tridxby ::= */ yytestcase(yyruleno==386); + /* (387) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==387); + /* (388) database_kw_opt ::= */ yytestcase(yyruleno==388); + /* (389) kwcolumn_opt ::= */ yytestcase(yyruleno==389); + /* (390) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==390); + /* (391) vtabarglist ::= vtabarg */ yytestcase(yyruleno==391); + /* (392) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==392); + /* (393) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==393); + /* (394) anylist ::= */ yytestcase(yyruleno==394); + /* (395) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==395); + /* (396) anylist ::= anylist ANY */ yytestcase(yyruleno==396); + /* (397) with ::= */ yytestcase(yyruleno==397); break; /********** End reduce actions ************************************************/ }; @@ -159917,12 +162114,56 @@ SQLITE_PRIVATE void sqlite3Parser( } #endif - do{ + while(1){ /* Exit by "break" */ + assert( yypParser->yytos>=yypParser->yystack ); assert( yyact==yypParser->yytos->stateno ); yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); if( yyact >= YY_MIN_REDUCE ){ - yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor, - yyminor sqlite3ParserCTX_PARAM); + unsigned int yyruleno = yyact - YY_MIN_REDUCE; /* Reduce by this rule */ + assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ); +#ifndef NDEBUG + if( yyTraceFILE ){ + int yysize = yyRuleInfoNRhs[yyruleno]; + if( yysize ){ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", + yyTracePrompt, + yyruleno, yyRuleName[yyruleno], + yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action", + yypParser->yytos[yysize].stateno); + }else{ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s.\n", + yyTracePrompt, yyruleno, yyRuleName[yyruleno], + yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action"); + } + } +#endif /* NDEBUG */ + + /* Check that the stack is large enough to grow by a single entry + ** if the RHS of the rule is empty. This ensures that there is room + ** enough on the stack to push the LHS value */ + if( yyRuleInfoNRhs[yyruleno]==0 ){ +#ifdef YYTRACKMAXSTACKDEPTH + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == + (int)(yypParser->yytos - yypParser->yystack)); + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yytos>=yypParser->yystackEnd ){ + yyStackOverflow(yypParser); + break; + } +#else + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ + if( yyGrowStack(yypParser) ){ + yyStackOverflow(yypParser); + break; + } + } +#endif + } + yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor sqlite3ParserCTX_PARAM); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); #ifndef YYNOERRORRECOVERY @@ -160035,7 +162276,7 @@ SQLITE_PRIVATE void sqlite3Parser( break; #endif } - }while( yypParser->yytos>yypParser->yystack ); + } #ifndef NDEBUG if( yyTraceFILE ){ yyStackEntry *i; @@ -160096,8 +162337,8 @@ SQLITE_PRIVATE int sqlite3ParserFallback(int iToken){ ** all of them need to be used within the switch. */ #define CC_X 0 /* The letter 'x', or start of BLOB literal */ -#define CC_KYWD 1 /* Alphabetics or '_'. Usable in a keyword */ -#define CC_ID 2 /* unicode characters usable in IDs */ +#define CC_KYWD0 1 /* First letter of a keyword */ +#define CC_KYWD 2 /* Alphabetics or '_'. Usable in a keyword */ #define CC_DIGIT 3 /* Digits */ #define CC_DOLLAR 4 /* '$' */ #define CC_VARALPHA 5 /* '@', '#', ':'. Alphabetic SQL variables */ @@ -160122,20 +162363,21 @@ SQLITE_PRIVATE int sqlite3ParserFallback(int iToken){ #define CC_AND 24 /* '&' */ #define CC_TILDA 25 /* '~' */ #define CC_DOT 26 /* '.' */ -#define CC_ILLEGAL 27 /* Illegal character */ -#define CC_NUL 28 /* 0x00 */ +#define CC_ID 27 /* unicode characters usable in IDs */ +#define CC_ILLEGAL 28 /* Illegal character */ +#define CC_NUL 29 /* 0x00 */ static const unsigned char aiClass[] = { #ifdef SQLITE_ASCII /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf */ -/* 0x */ 28, 27, 27, 27, 27, 27, 27, 27, 27, 7, 7, 27, 7, 7, 27, 27, -/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* 0x */ 29, 28, 28, 28, 28, 28, 28, 28, 28, 7, 7, 28, 7, 7, 28, 28, +/* 1x */ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, /* 2x */ 7, 15, 8, 5, 4, 22, 24, 8, 17, 18, 21, 20, 23, 11, 26, 16, /* 3x */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 19, 12, 14, 13, 6, /* 4x */ 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 5x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 9, 27, 27, 27, 1, +/* 5x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 9, 28, 28, 28, 2, /* 6x */ 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 7x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 27, 10, 27, 25, 27, +/* 7x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 28, 10, 28, 25, 28, /* 8x */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* 9x */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* Ax */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, @@ -160147,22 +162389,22 @@ static const unsigned char aiClass[] = { #endif #ifdef SQLITE_EBCDIC /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf */ -/* 0x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 7, 7, 27, 27, -/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -/* 2x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -/* 3x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -/* 4x */ 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 26, 12, 17, 20, 10, -/* 5x */ 24, 27, 27, 27, 27, 27, 27, 27, 27, 27, 15, 4, 21, 18, 19, 27, -/* 6x */ 11, 16, 27, 27, 27, 27, 27, 27, 27, 27, 27, 23, 22, 1, 13, 6, -/* 7x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 8, 5, 5, 5, 8, 14, 8, -/* 8x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, -/* 9x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, -/* Ax */ 27, 25, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27, -/* Bx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 27, 27, 27, 27, 27, -/* Cx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, -/* Dx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, -/* Ex */ 27, 27, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27, -/* Fx */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 27, 27, 27, 27, 27, 27, +/* 0x */ 29, 28, 28, 28, 28, 7, 28, 28, 28, 28, 28, 28, 7, 7, 28, 28, +/* 1x */ 28, 28, 28, 28, 28, 7, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +/* 2x */ 28, 28, 28, 28, 28, 7, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +/* 3x */ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +/* 4x */ 7, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 26, 12, 17, 20, 10, +/* 5x */ 24, 28, 28, 28, 28, 28, 28, 28, 28, 28, 15, 4, 21, 18, 19, 28, +/* 6x */ 11, 16, 28, 28, 28, 28, 28, 28, 28, 28, 28, 23, 22, 2, 13, 6, +/* 7x */ 28, 28, 28, 28, 28, 28, 28, 28, 28, 8, 5, 5, 5, 8, 14, 8, +/* 8x */ 28, 1, 1, 1, 1, 1, 1, 1, 1, 1, 28, 28, 28, 28, 28, 28, +/* 9x */ 28, 1, 1, 1, 1, 1, 1, 1, 1, 1, 28, 28, 28, 28, 28, 28, +/* Ax */ 28, 25, 1, 1, 1, 1, 1, 0, 2, 2, 28, 28, 28, 28, 28, 28, +/* Bx */ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 9, 28, 28, 28, 28, 28, +/* Cx */ 28, 1, 1, 1, 1, 1, 1, 1, 1, 1, 28, 28, 28, 28, 28, 28, +/* Dx */ 28, 1, 1, 1, 1, 1, 1, 1, 1, 1, 28, 28, 28, 28, 28, 28, +/* Ex */ 28, 28, 1, 1, 1, 1, 1, 0, 2, 2, 28, 28, 28, 28, 28, 28, +/* Fx */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 28, 28, 28, 28, 28, 28, #endif }; @@ -160227,20 +162469,21 @@ const unsigned char ebcdicToAscii[] = { ** is substantially reduced. This is important for embedded applications ** on platforms with limited memory. */ -/* Hash score: 227 */ -/* zKWText[] encodes 984 bytes of keyword text in 648 bytes */ +/* Hash score: 231 */ +/* zKWText[] encodes 1007 bytes of keyword text in 667 bytes */ /* REINDEXEDESCAPEACHECKEYBEFOREIGNOREGEXPLAINSTEADDATABASELECT */ /* ABLEFTHENDEFERRABLELSEXCLUDELETEMPORARYISNULLSAVEPOINTERSECT */ /* IESNOTNULLIKEXCEPTRANSACTIONATURALTERAISEXCLUSIVEXISTS */ /* CONSTRAINTOFFSETRIGGERANGENERATEDETACHAVINGLOBEGINNEREFERENCES */ /* UNIQUERYWITHOUTERELEASEATTACHBETWEENOTHINGROUPSCASCADEFAULT */ /* CASECOLLATECREATECURRENT_DATEIMMEDIATEJOINSERTMATCHPLANALYZE */ -/* PRAGMABORTUPDATEVALUESVIRTUALWAYSWHENWHERECURSIVEAFTERENAMEAND */ -/* EFERREDISTINCTAUTOINCREMENTCASTCOLUMNCOMMITCONFLICTCROSS */ -/* CURRENT_TIMESTAMPARTITIONDROPRECEDINGFAILASTFILTEREPLACEFIRST */ -/* FOLLOWINGFROMFULLIMITIFORDERESTRICTOTHERSOVERIGHTROLLBACKROWS */ -/* UNBOUNDEDUNIONUSINGVACUUMVIEWINDOWBYINITIALLYPRIMARY */ -static const char zKWText[647] = { +/* PRAGMATERIALIZEDEFERREDISTINCTUPDATEVALUESVIRTUALWAYSWHENWHERE */ +/* CURSIVEABORTAFTERENAMEANDROPARTITIONAUTOINCREMENTCASTCOLUMN */ +/* COMMITCONFLICTCROSSCURRENT_TIMESTAMPRECEDINGFAILASTFILTER */ +/* EPLACEFIRSTFOLLOWINGFROMFULLIMITIFORDERESTRICTOTHERSOVER */ +/* ETURNINGRIGHTROLLBACKROWSUNBOUNDEDUNIONUSINGVACUUMVIEWINDOWBY */ +/* INITIALLYPRIMARY */ +static const char zKWText[666] = { 'R','E','I','N','D','E','X','E','D','E','S','C','A','P','E','A','C','H', 'E','C','K','E','Y','B','E','F','O','R','E','I','G','N','O','R','E','G', 'E','X','P','L','A','I','N','S','T','E','A','D','D','A','T','A','B','A', @@ -160261,86 +162504,87 @@ static const char zKWText[647] = { 'C','R','E','A','T','E','C','U','R','R','E','N','T','_','D','A','T','E', 'I','M','M','E','D','I','A','T','E','J','O','I','N','S','E','R','T','M', 'A','T','C','H','P','L','A','N','A','L','Y','Z','E','P','R','A','G','M', - 'A','B','O','R','T','U','P','D','A','T','E','V','A','L','U','E','S','V', - 'I','R','T','U','A','L','W','A','Y','S','W','H','E','N','W','H','E','R', - 'E','C','U','R','S','I','V','E','A','F','T','E','R','E','N','A','M','E', - 'A','N','D','E','F','E','R','R','E','D','I','S','T','I','N','C','T','A', - 'U','T','O','I','N','C','R','E','M','E','N','T','C','A','S','T','C','O', - 'L','U','M','N','C','O','M','M','I','T','C','O','N','F','L','I','C','T', - 'C','R','O','S','S','C','U','R','R','E','N','T','_','T','I','M','E','S', - 'T','A','M','P','A','R','T','I','T','I','O','N','D','R','O','P','R','E', - 'C','E','D','I','N','G','F','A','I','L','A','S','T','F','I','L','T','E', - 'R','E','P','L','A','C','E','F','I','R','S','T','F','O','L','L','O','W', - 'I','N','G','F','R','O','M','F','U','L','L','I','M','I','T','I','F','O', - 'R','D','E','R','E','S','T','R','I','C','T','O','T','H','E','R','S','O', - 'V','E','R','I','G','H','T','R','O','L','L','B','A','C','K','R','O','W', - 'S','U','N','B','O','U','N','D','E','D','U','N','I','O','N','U','S','I', - 'N','G','V','A','C','U','U','M','V','I','E','W','I','N','D','O','W','B', - 'Y','I','N','I','T','I','A','L','L','Y','P','R','I','M','A','R','Y', + 'A','T','E','R','I','A','L','I','Z','E','D','E','F','E','R','R','E','D', + 'I','S','T','I','N','C','T','U','P','D','A','T','E','V','A','L','U','E', + 'S','V','I','R','T','U','A','L','W','A','Y','S','W','H','E','N','W','H', + 'E','R','E','C','U','R','S','I','V','E','A','B','O','R','T','A','F','T', + 'E','R','E','N','A','M','E','A','N','D','R','O','P','A','R','T','I','T', + 'I','O','N','A','U','T','O','I','N','C','R','E','M','E','N','T','C','A', + 'S','T','C','O','L','U','M','N','C','O','M','M','I','T','C','O','N','F', + 'L','I','C','T','C','R','O','S','S','C','U','R','R','E','N','T','_','T', + 'I','M','E','S','T','A','M','P','R','E','C','E','D','I','N','G','F','A', + 'I','L','A','S','T','F','I','L','T','E','R','E','P','L','A','C','E','F', + 'I','R','S','T','F','O','L','L','O','W','I','N','G','F','R','O','M','F', + 'U','L','L','I','M','I','T','I','F','O','R','D','E','R','E','S','T','R', + 'I','C','T','O','T','H','E','R','S','O','V','E','R','E','T','U','R','N', + 'I','N','G','R','I','G','H','T','R','O','L','L','B','A','C','K','R','O', + 'W','S','U','N','B','O','U','N','D','E','D','U','N','I','O','N','U','S', + 'I','N','G','V','A','C','U','U','M','V','I','E','W','I','N','D','O','W', + 'B','Y','I','N','I','T','I','A','L','L','Y','P','R','I','M','A','R','Y', }; /* aKWHash[i] is the hash value for the i-th keyword */ static const unsigned char aKWHash[127] = { - 84, 102, 132, 82, 114, 29, 0, 0, 91, 0, 85, 72, 0, - 53, 35, 86, 15, 0, 42, 94, 54, 126, 133, 19, 0, 0, - 138, 0, 40, 128, 0, 22, 104, 0, 9, 0, 0, 122, 80, - 0, 78, 6, 0, 65, 99, 145, 0, 134, 112, 0, 0, 48, - 0, 100, 24, 0, 17, 0, 27, 70, 23, 26, 5, 60, 140, - 107, 121, 0, 73, 101, 71, 143, 61, 119, 74, 0, 49, 0, - 11, 41, 0, 110, 0, 0, 0, 106, 10, 108, 113, 124, 14, - 50, 123, 0, 89, 0, 18, 120, 142, 56, 129, 137, 88, 83, - 37, 30, 125, 0, 0, 105, 51, 130, 127, 0, 34, 0, 0, - 44, 0, 95, 38, 39, 0, 20, 45, 116, 90, + 84, 92, 134, 82, 105, 29, 0, 0, 94, 0, 85, 72, 0, + 53, 35, 86, 15, 0, 42, 97, 54, 89, 135, 19, 0, 0, + 140, 0, 40, 129, 0, 22, 107, 0, 9, 0, 0, 123, 80, + 0, 78, 6, 0, 65, 103, 147, 0, 136, 115, 0, 0, 48, + 0, 90, 24, 0, 17, 0, 27, 70, 23, 26, 5, 60, 142, + 110, 122, 0, 73, 91, 71, 145, 61, 120, 74, 0, 49, 0, + 11, 41, 0, 113, 0, 0, 0, 109, 10, 111, 116, 125, 14, + 50, 124, 0, 100, 0, 18, 121, 144, 56, 130, 139, 88, 83, + 37, 30, 126, 0, 0, 108, 51, 131, 128, 0, 34, 0, 0, + 132, 0, 98, 38, 39, 0, 20, 45, 117, 93, }; /* aKWNext[] forms the hash collision chain. If aKWHash[i]==0 ** then the i-th keyword has no more hash collisions. Otherwise, ** the next keyword with the same hash is aKWHash[i]-1. */ -static const unsigned char aKWNext[145] = { - 0, 0, 0, 0, 4, 0, 43, 0, 0, 103, 111, 0, 0, - 0, 2, 0, 0, 141, 0, 0, 0, 13, 0, 0, 0, 0, - 139, 0, 0, 118, 52, 0, 0, 135, 12, 0, 0, 62, 0, - 136, 0, 131, 0, 0, 36, 0, 0, 28, 77, 0, 0, 0, +static const unsigned char aKWNext[147] = { + 0, 0, 0, 0, 4, 0, 43, 0, 0, 106, 114, 0, 0, + 0, 2, 0, 0, 143, 0, 0, 0, 13, 0, 0, 0, 0, + 141, 0, 0, 119, 52, 0, 0, 137, 12, 0, 0, 62, 0, + 138, 0, 133, 0, 0, 36, 0, 0, 28, 77, 0, 0, 0, 0, 59, 0, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 69, 0, 0, 0, 0, 0, 144, 3, 0, 58, 0, 1, - 75, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 64, 66, - 63, 0, 0, 0, 0, 46, 0, 16, 0, 115, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 81, 97, 0, 8, 0, 109, - 21, 7, 67, 0, 79, 93, 117, 0, 0, 68, 0, 0, 96, - 0, 55, 0, 76, 0, 92, 32, 33, 57, 25, 0, 98, 0, - 0, 87, + 0, 69, 0, 0, 0, 0, 0, 146, 3, 0, 58, 0, 1, + 75, 0, 0, 0, 31, 0, 0, 0, 0, 0, 127, 0, 104, + 0, 64, 66, 63, 0, 0, 0, 0, 0, 46, 0, 16, 8, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 81, 101, 0, + 112, 21, 7, 67, 0, 79, 96, 118, 0, 0, 68, 0, 0, + 99, 44, 0, 55, 0, 76, 0, 95, 32, 33, 57, 25, 0, + 102, 0, 0, 87, }; /* aKWLen[i] is the length (in bytes) of the i-th keyword */ -static const unsigned char aKWLen[145] = { +static const unsigned char aKWLen[147] = { 7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6, 7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 7, 6, 9, 4, 2, 6, 5, 9, 9, 4, 7, 3, 2, 4, 4, 6, 11, 6, 2, 7, 5, 5, 9, 6, 10, 4, 6, 2, 3, 7, 5, 9, 6, 6, 4, 5, 5, 10, 6, 5, 7, 4, 5, 7, 6, 7, 7, 6, 5, 7, 3, 7, 4, - 7, 6, 12, 9, 4, 6, 5, 4, 7, 6, 5, 6, 6, - 7, 6, 4, 5, 9, 5, 6, 3, 8, 8, 2, 13, 2, - 2, 4, 6, 6, 8, 5, 17, 12, 7, 9, 4, 9, 4, - 4, 6, 7, 5, 9, 4, 4, 5, 2, 5, 8, 6, 4, - 5, 8, 4, 3, 9, 5, 5, 6, 4, 6, 2, 2, 9, - 3, 7, + 7, 6, 12, 9, 4, 6, 5, 4, 7, 6, 12, 8, 8, + 2, 6, 6, 7, 6, 4, 5, 9, 5, 5, 6, 3, 4, + 9, 13, 2, 2, 4, 6, 6, 8, 5, 17, 12, 7, 9, + 4, 4, 6, 7, 5, 9, 4, 4, 5, 2, 5, 8, 6, + 4, 9, 5, 8, 4, 3, 9, 5, 5, 6, 4, 6, 2, + 2, 9, 3, 7, }; /* aKWOffset[i] is the index into zKWText[] of the start of ** the text for the i-th keyword. */ -static const unsigned short int aKWOffset[145] = { +static const unsigned short int aKWOffset[147] = { 0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33, 36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81, 86, 90, 90, 94, 99, 101, 105, 111, 119, 123, 123, 123, 126, 129, 132, 137, 142, 146, 147, 152, 156, 160, 168, 174, 181, 184, 184, 187, 189, 195, 198, 206, 211, 216, 219, 222, 226, 236, 239, 244, 244, 248, 252, 259, 265, 271, 277, 277, 283, 284, 288, 295, - 299, 306, 312, 324, 333, 335, 341, 346, 348, 355, 360, 365, 371, - 377, 382, 388, 392, 395, 404, 408, 414, 416, 423, 424, 431, 433, - 435, 444, 448, 454, 460, 468, 473, 473, 473, 489, 498, 501, 510, - 513, 517, 522, 529, 534, 543, 547, 550, 555, 557, 561, 569, 575, - 578, 583, 591, 591, 595, 604, 609, 614, 620, 623, 626, 629, 631, - 636, 640, + 299, 306, 312, 324, 333, 335, 341, 346, 348, 355, 359, 370, 377, + 378, 385, 391, 397, 402, 408, 412, 415, 424, 429, 433, 439, 441, + 444, 453, 455, 457, 466, 470, 476, 482, 490, 495, 495, 495, 511, + 520, 523, 527, 532, 539, 544, 553, 557, 560, 565, 567, 571, 579, + 585, 588, 597, 602, 610, 610, 614, 623, 628, 633, 639, 642, 645, + 648, 650, 655, 659, }; /* aKWCode[i] is the parser symbol code for the i-th keyword */ -static const unsigned char aKWCode[145] = { +static const unsigned char aKWCode[147] = { TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE, TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN, TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD, @@ -160358,18 +162602,19 @@ static const unsigned char aKWCode[145] = { TK_BETWEEN, TK_NOTHING, TK_GROUPS, TK_GROUP, TK_CASCADE, TK_ASC, TK_DEFAULT, TK_CASE, TK_COLLATE, TK_CREATE, TK_CTIME_KW, TK_IMMEDIATE, TK_JOIN, TK_INSERT, TK_MATCH, - TK_PLAN, TK_ANALYZE, TK_PRAGMA, TK_ABORT, TK_UPDATE, - TK_VALUES, TK_VIRTUAL, TK_ALWAYS, TK_WHEN, TK_WHERE, - TK_RECURSIVE, TK_AFTER, TK_RENAME, TK_AND, TK_DEFERRED, - TK_DISTINCT, TK_IS, TK_AUTOINCR, TK_TO, TK_IN, - TK_CAST, TK_COLUMNKW, TK_COMMIT, TK_CONFLICT, TK_JOIN_KW, - TK_CTIME_KW, TK_CTIME_KW, TK_CURRENT, TK_PARTITION, TK_DROP, - TK_PRECEDING, TK_FAIL, TK_LAST, TK_FILTER, TK_REPLACE, - TK_FIRST, TK_FOLLOWING, TK_FROM, TK_JOIN_KW, TK_LIMIT, - TK_IF, TK_ORDER, TK_RESTRICT, TK_OTHERS, TK_OVER, - TK_JOIN_KW, TK_ROLLBACK, TK_ROWS, TK_ROW, TK_UNBOUNDED, - TK_UNION, TK_USING, TK_VACUUM, TK_VIEW, TK_WINDOW, - TK_DO, TK_BY, TK_INITIALLY, TK_ALL, TK_PRIMARY, + TK_PLAN, TK_ANALYZE, TK_PRAGMA, TK_MATERIALIZED, TK_DEFERRED, + TK_DISTINCT, TK_IS, TK_UPDATE, TK_VALUES, TK_VIRTUAL, + TK_ALWAYS, TK_WHEN, TK_WHERE, TK_RECURSIVE, TK_ABORT, + TK_AFTER, TK_RENAME, TK_AND, TK_DROP, TK_PARTITION, + TK_AUTOINCR, TK_TO, TK_IN, TK_CAST, TK_COLUMNKW, + TK_COMMIT, TK_CONFLICT, TK_JOIN_KW, TK_CTIME_KW, TK_CTIME_KW, + TK_CURRENT, TK_PRECEDING, TK_FAIL, TK_LAST, TK_FILTER, + TK_REPLACE, TK_FIRST, TK_FOLLOWING, TK_FROM, TK_JOIN_KW, + TK_LIMIT, TK_IF, TK_ORDER, TK_RESTRICT, TK_OTHERS, + TK_OVER, TK_RETURNING, TK_JOIN_KW, TK_ROLLBACK, TK_ROWS, + TK_ROW, TK_UNBOUNDED, TK_UNION, TK_USING, TK_VACUUM, + TK_VIEW, TK_WINDOW, TK_DO, TK_BY, TK_INITIALLY, + TK_ALL, TK_PRIMARY, }; /* Hash table decoded: ** 0: INSERT @@ -160393,7 +162638,7 @@ static const unsigned char aKWCode[145] = { ** 18: TRANSACTION RIGHT ** 19: WHEN ** 20: SET HAVING -** 21: IF +** 21: MATERIALIZED IF ** 22: ROWS ** 23: SELECT ** 24: @@ -160489,7 +162734,7 @@ static const unsigned char aKWCode[145] = { ** 114: INTERSECT UNBOUNDED ** 115: ** 116: -** 117: ON +** 117: RETURNING ON ** 118: ** 119: WHERE ** 120: NO INNER @@ -160507,7 +162752,7 @@ static int keywordCode(const char *z, int n, int *pType){ int i, j; const char *zKW; if( n>=2 ){ - i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n) % 127; + i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127; for(i=((int)aKWHash[i])-1; i>=0; i=((int)aKWNext[i])-1){ if( aKWLen[i]!=n ) continue; zKW = &zKWText[aKWOffset[i]]; @@ -160612,63 +162857,65 @@ static int keywordCode(const char *z, int n, int *pType){ testcase( i==85 ); /* PLAN */ testcase( i==86 ); /* ANALYZE */ testcase( i==87 ); /* PRAGMA */ - testcase( i==88 ); /* ABORT */ - testcase( i==89 ); /* UPDATE */ - testcase( i==90 ); /* VALUES */ - testcase( i==91 ); /* VIRTUAL */ - testcase( i==92 ); /* ALWAYS */ - testcase( i==93 ); /* WHEN */ - testcase( i==94 ); /* WHERE */ - testcase( i==95 ); /* RECURSIVE */ - testcase( i==96 ); /* AFTER */ - testcase( i==97 ); /* RENAME */ - testcase( i==98 ); /* AND */ - testcase( i==99 ); /* DEFERRED */ - testcase( i==100 ); /* DISTINCT */ - testcase( i==101 ); /* IS */ - testcase( i==102 ); /* AUTOINCREMENT */ - testcase( i==103 ); /* TO */ - testcase( i==104 ); /* IN */ - testcase( i==105 ); /* CAST */ - testcase( i==106 ); /* COLUMN */ - testcase( i==107 ); /* COMMIT */ - testcase( i==108 ); /* CONFLICT */ - testcase( i==109 ); /* CROSS */ - testcase( i==110 ); /* CURRENT_TIMESTAMP */ - testcase( i==111 ); /* CURRENT_TIME */ - testcase( i==112 ); /* CURRENT */ - testcase( i==113 ); /* PARTITION */ - testcase( i==114 ); /* DROP */ - testcase( i==115 ); /* PRECEDING */ - testcase( i==116 ); /* FAIL */ - testcase( i==117 ); /* LAST */ - testcase( i==118 ); /* FILTER */ - testcase( i==119 ); /* REPLACE */ - testcase( i==120 ); /* FIRST */ - testcase( i==121 ); /* FOLLOWING */ - testcase( i==122 ); /* FROM */ - testcase( i==123 ); /* FULL */ - testcase( i==124 ); /* LIMIT */ - testcase( i==125 ); /* IF */ - testcase( i==126 ); /* ORDER */ - testcase( i==127 ); /* RESTRICT */ - testcase( i==128 ); /* OTHERS */ - testcase( i==129 ); /* OVER */ - testcase( i==130 ); /* RIGHT */ - testcase( i==131 ); /* ROLLBACK */ - testcase( i==132 ); /* ROWS */ - testcase( i==133 ); /* ROW */ - testcase( i==134 ); /* UNBOUNDED */ - testcase( i==135 ); /* UNION */ - testcase( i==136 ); /* USING */ - testcase( i==137 ); /* VACUUM */ - testcase( i==138 ); /* VIEW */ - testcase( i==139 ); /* WINDOW */ - testcase( i==140 ); /* DO */ - testcase( i==141 ); /* BY */ - testcase( i==142 ); /* INITIALLY */ - testcase( i==143 ); /* ALL */ - testcase( i==144 ); /* PRIMARY */ + testcase( i==88 ); /* MATERIALIZED */ + testcase( i==89 ); /* DEFERRED */ + testcase( i==90 ); /* DISTINCT */ + testcase( i==91 ); /* IS */ + testcase( i==92 ); /* UPDATE */ + testcase( i==93 ); /* VALUES */ + testcase( i==94 ); /* VIRTUAL */ + testcase( i==95 ); /* ALWAYS */ + testcase( i==96 ); /* WHEN */ + testcase( i==97 ); /* WHERE */ + testcase( i==98 ); /* RECURSIVE */ + testcase( i==99 ); /* ABORT */ + testcase( i==100 ); /* AFTER */ + testcase( i==101 ); /* RENAME */ + testcase( i==102 ); /* AND */ + testcase( i==103 ); /* DROP */ + testcase( i==104 ); /* PARTITION */ + testcase( i==105 ); /* AUTOINCREMENT */ + testcase( i==106 ); /* TO */ + testcase( i==107 ); /* IN */ + testcase( i==108 ); /* CAST */ + testcase( i==109 ); /* COLUMN */ + testcase( i==110 ); /* COMMIT */ + testcase( i==111 ); /* CONFLICT */ + testcase( i==112 ); /* CROSS */ + testcase( i==113 ); /* CURRENT_TIMESTAMP */ + testcase( i==114 ); /* CURRENT_TIME */ + testcase( i==115 ); /* CURRENT */ + testcase( i==116 ); /* PRECEDING */ + testcase( i==117 ); /* FAIL */ + testcase( i==118 ); /* LAST */ + testcase( i==119 ); /* FILTER */ + testcase( i==120 ); /* REPLACE */ + testcase( i==121 ); /* FIRST */ + testcase( i==122 ); /* FOLLOWING */ + testcase( i==123 ); /* FROM */ + testcase( i==124 ); /* FULL */ + testcase( i==125 ); /* LIMIT */ + testcase( i==126 ); /* IF */ + testcase( i==127 ); /* ORDER */ + testcase( i==128 ); /* RESTRICT */ + testcase( i==129 ); /* OTHERS */ + testcase( i==130 ); /* OVER */ + testcase( i==131 ); /* RETURNING */ + testcase( i==132 ); /* RIGHT */ + testcase( i==133 ); /* ROLLBACK */ + testcase( i==134 ); /* ROWS */ + testcase( i==135 ); /* ROW */ + testcase( i==136 ); /* UNBOUNDED */ + testcase( i==137 ); /* UNION */ + testcase( i==138 ); /* USING */ + testcase( i==139 ); /* VACUUM */ + testcase( i==140 ); /* VIEW */ + testcase( i==141 ); /* WINDOW */ + testcase( i==142 ); /* DO */ + testcase( i==143 ); /* BY */ + testcase( i==144 ); /* INITIALLY */ + testcase( i==145 ); /* ALL */ + testcase( i==146 ); /* PRIMARY */ *pType = aKWCode[i]; break; } @@ -160680,7 +162927,7 @@ SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){ keywordCode((char*)z, n, &id); return id; } -#define SQLITE_N_KEYWORD 145 +#define SQLITE_N_KEYWORD 147 SQLITE_API int sqlite3_keyword_name(int i,const char **pzName,int *pnName){ if( i<0 || i>=SQLITE_N_KEYWORD ) return SQLITE_ERROR; *pzName = zKWText + aKWOffset[i]; @@ -161049,7 +163296,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ if( n==0 ) *tokenType = TK_ILLEGAL; return i; } - case CC_KYWD: { + case CC_KYWD0: { for(i=1; aiClass[z[i]]<=CC_KYWD; i++){} if( IdChar(z[i]) ){ /* This token started out using characters that can appear in keywords, @@ -161079,6 +163326,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ ** SQL keywords start with the letter 'x'. Fall through */ /* no break */ deliberate_fall_through } + case CC_KYWD: case CC_ID: { i = 1; break; @@ -161261,19 +163509,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr if( !IN_RENAME_OBJECT ){ sqlite3DeleteTrigger(db, pParse->pNewTrigger); } - - if( pParse->pWithToFree ) sqlite3WithDelete(db, pParse->pWithToFree); sqlite3DbFree(db, pParse->pVList); - while( pParse->pAinc ){ - AutoincInfo *p = pParse->pAinc; - pParse->pAinc = p->pNext; - sqlite3DbFreeNN(db, p); - } - while( pParse->pZombieTab ){ - Table *p = pParse->pZombieTab; - pParse->pZombieTab = p->pNextZombie; - sqlite3DeleteTable(db, p); - } db->pParse = pParse->pParentParse; pParse->pParentParse = 0; assert( nErr==0 || pParse->rc!=SQLITE_OK ); @@ -164180,7 +166416,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( return SQLITE_OK; #else int rc; /* Return code */ - int iDb = SQLITE_MAX_ATTACHED; /* sqlite3.aDb[] index of db to checkpoint */ + int iDb; /* Schema to checkpoint */ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; @@ -164203,6 +166439,8 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( sqlite3_mutex_enter(db->mutex); if( zDb && zDb[0] ){ iDb = sqlite3FindDbName(db, zDb); + }else{ + iDb = SQLITE_MAX_DB; /* This means process all schemas */ } if( iDb<0 ){ rc = SQLITE_ERROR; @@ -164251,7 +166489,7 @@ SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){ ** associated with the specific b-tree being checkpointed is taken by ** this function while the checkpoint is running. ** -** If iDb is passed SQLITE_MAX_ATTACHED, then all attached databases are +** If iDb is passed SQLITE_MAX_DB then all attached databases are ** checkpointed. If an error is encountered it is returned immediately - ** no attempt is made to checkpoint any remaining databases. ** @@ -164266,9 +166504,11 @@ SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3 *db, int iDb, int eMode, int *pnLog assert( sqlite3_mutex_held(db->mutex) ); assert( !pnLog || *pnLog==-1 ); assert( !pnCkpt || *pnCkpt==-1 ); + testcase( iDb==SQLITE_MAX_ATTACHED ); /* See forum post a006d86f72 */ + testcase( iDb==SQLITE_MAX_DB ); for(i=0; i<db->nDb && rc==SQLITE_OK; i++){ - if( i==iDb || iDb==SQLITE_MAX_ATTACHED ){ + if( i==iDb || iDb==SQLITE_MAX_DB ){ rc = sqlite3BtreeCheckpoint(db->aDb[i].pBt, eMode, pnLog, pnCkpt); pnLog = 0; pnCkpt = 0; @@ -165886,7 +168126,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){ */ case SQLITE_TESTCTRL_OPTIMIZATIONS: { sqlite3 *db = va_arg(ap, sqlite3*); - db->dbOptFlags = (u16)(va_arg(ap, int) & 0xffff); + db->dbOptFlags = va_arg(ap, u32); break; } @@ -166061,7 +168301,26 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } - + /* sqlite3_test_control(SQLITE_TESTCTRL_TRACEFLAGS, op, ptr) + ** + ** "ptr" is a pointer to a u32. + ** + ** op==0 Store the current sqlite3SelectTrace in *ptr + ** op==1 Set sqlite3SelectTrace to the value *ptr + ** op==3 Store the current sqlite3WhereTrace in *ptr + ** op==3 Set sqlite3WhereTrace to the value *ptr + */ + case SQLITE_TESTCTRL_TRACEFLAGS: { + int opTrace = va_arg(ap, int); + u32 *ptr = va_arg(ap, u32*); + switch( opTrace ){ + case 0: *ptr = sqlite3SelectTrace; break; + case 1: sqlite3SelectTrace = *ptr; break; + case 2: *ptr = sqlite3WhereTrace; break; + case 3: sqlite3WhereTrace = *ptr; break; + } + break; + } } va_end(ap); #endif /* SQLITE_UNTESTABLE */ @@ -172946,9 +175205,9 @@ static int fts3EvalNearTrim( ); if( res ){ nNew = (int)(pOut - pPhrase->doclist.pList) - 1; - if( nNew>=0 ){ + assert_fts3_nc( nNew<=pPhrase->doclist.nList && nNew>0 ); + if( nNew>=0 && nNew<=pPhrase->doclist.nList ){ assert( pPhrase->doclist.pList[nNew]=='\0' ); - assert( nNew<=pPhrase->doclist.nList && nNew>0 ); memset(&pPhrase->doclist.pList[nNew], 0, pPhrase->doclist.nList - nNew); pPhrase->doclist.nList = nNew; } @@ -174882,6 +177141,11 @@ static int getNextNode( if( *zInput=='(' ){ int nConsumed = 0; pParse->nNest++; +#if !defined(SQLITE_MAX_EXPR_DEPTH) + if( pParse->nNest>1000 ) return SQLITE_ERROR; +#elif SQLITE_MAX_EXPR_DEPTH>0 + if( pParse->nNest>SQLITE_MAX_EXPR_DEPTH ) return SQLITE_ERROR; +#endif rc = fts3ExprParse(pParse, zInput+1, nInput-1, ppExpr, &nConsumed); *pnConsumed = (int)(zInput - z) + 1 + nConsumed; return rc; @@ -182285,17 +184549,20 @@ static int fts3IncrmergeLoad( while( reader.aNode && rc==SQLITE_OK ) rc = nodeReaderNext(&reader); blobGrowBuffer(&pNode->key, reader.term.n, &rc); if( rc==SQLITE_OK ){ - memcpy(pNode->key.a, reader.term.a, reader.term.n); + assert_fts3_nc( reader.term.n>0 || reader.aNode==0 ); + if( reader.term.n>0 ){ + memcpy(pNode->key.a, reader.term.a, reader.term.n); + } pNode->key.n = reader.term.n; if( i>0 ){ char *aBlock = 0; int nBlock = 0; pNode = &pWriter->aNodeWriter[i-1]; pNode->iBlock = reader.iChild; - rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock, 0); + rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock,0); blobGrowBuffer(&pNode->block, MAX(nBlock, p->nNodeSize)+FTS3_NODE_PADDING, &rc - ); + ); if( rc==SQLITE_OK ){ memcpy(pNode->block.a, aBlock, nBlock); pNode->block.n = nBlock; @@ -185783,6 +188050,7 @@ static int unicodeOpen( pCsr->aInput = (const unsigned char *)aInput; if( aInput==0 ){ pCsr->nInput = 0; + pCsr->aInput = (const unsigned char*)""; }else if( nInput<0 ){ pCsr->nInput = (int)strlen(aInput); }else{ @@ -201538,22 +203806,24 @@ static int rbuVfsShmLock(sqlite3_file *pFile, int ofst, int n, int flags){ #endif assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) ); - if( pRbu && (pRbu->eStage==RBU_STAGE_OAL || pRbu->eStage==RBU_STAGE_MOVE) ){ - /* Magic number 1 is the WAL_CKPT_LOCK lock. Preventing SQLite from - ** taking this lock also prevents any checkpoints from occurring. - ** todo: really, it's not clear why this might occur, as - ** wal_autocheckpoint ought to be turned off. */ + if( pRbu && ( + pRbu->eStage==RBU_STAGE_OAL + || pRbu->eStage==RBU_STAGE_MOVE + || pRbu->eStage==RBU_STAGE_DONE + )){ + /* Prevent SQLite from taking a shm-lock on the target file when it + ** is supplying heap memory to the upper layer in place of *-shm + ** segments. */ if( ofst==WAL_LOCK_CKPT && n==1 ) rc = SQLITE_BUSY; }else{ int bCapture = 0; if( pRbu && pRbu->eStage==RBU_STAGE_CAPTURE ){ bCapture = 1; } - if( bCapture==0 || 0==(flags & SQLITE_SHM_UNLOCK) ){ rc = p->pReal->pMethods->xShmLock(p->pReal, ofst, n, flags); if( bCapture && rc==SQLITE_OK ){ - pRbu->mLock |= (1 << ofst); + pRbu->mLock |= ((1<<n) - 1) << ofst; } } } @@ -203340,6 +205610,7 @@ struct sqlite3_session { int rc; /* Non-zero if an error has occurred */ void *pFilterCtx; /* First argument to pass to xTableFilter */ int (*xTableFilter)(void *pCtx, const char *zTab); + i64 nMalloc; /* Number of bytes of data allocated */ sqlite3_value *pZeroBlob; /* Value containing X'' */ sqlite3_session *pNext; /* Next session object on same db. */ SessionTable *pTable; /* List of attached tables */ @@ -203382,6 +205653,7 @@ struct sqlite3_changeset_iter { SessionBuffer tblhdr; /* Buffer to hold apValue/zTab/abPK/ */ int bPatchset; /* True if this is a patchset */ int bInvert; /* True to invert changeset */ + int bSkipEmpty; /* Skip noop UPDATE changes */ int rc; /* Iterator error code */ sqlite3_stmt *pConflict; /* Points to conflicting row, if any */ char *zTab; /* Current table */ @@ -203723,6 +205995,26 @@ static int sessionSerializeValue( return SQLITE_OK; } +/* +** Allocate and return a pointer to a buffer nByte bytes in size. If +** pSession is not NULL, increase the sqlite3_session.nMalloc variable +** by the number of bytes allocated. +*/ +static void *sessionMalloc64(sqlite3_session *pSession, i64 nByte){ + void *pRet = sqlite3_malloc64(nByte); + if( pSession ) pSession->nMalloc += sqlite3_msize(pRet); + return pRet; +} + +/* +** Free buffer pFree, which must have been allocated by an earlier +** call to sessionMalloc64(). If pSession is not NULL, decrease the +** sqlite3_session.nMalloc counter by the number of bytes freed. +*/ +static void sessionFree(sqlite3_session *pSession, void *pFree){ + if( pSession ) pSession->nMalloc -= sqlite3_msize(pFree); + sqlite3_free(pFree); +} /* ** This macro is used to calculate hash key values for data structures. In @@ -204190,13 +206482,19 @@ static int sessionPreupdateEqual( ** Growing the hash table in this case is a performance optimization only, ** it is not required for correct operation. */ -static int sessionGrowHash(int bPatchset, SessionTable *pTab){ +static int sessionGrowHash( + sqlite3_session *pSession, /* For memory accounting. May be NULL */ + int bPatchset, + SessionTable *pTab +){ if( pTab->nChange==0 || pTab->nEntry>=(pTab->nChange/2) ){ int i; SessionChange **apNew; sqlite3_int64 nNew = 2*(sqlite3_int64)(pTab->nChange ? pTab->nChange : 128); - apNew = (SessionChange **)sqlite3_malloc64(sizeof(SessionChange *) * nNew); + apNew = (SessionChange**)sessionMalloc64( + pSession, sizeof(SessionChange*) * nNew + ); if( apNew==0 ){ if( pTab->nChange==0 ){ return SQLITE_ERROR; @@ -204217,7 +206515,7 @@ static int sessionGrowHash(int bPatchset, SessionTable *pTab){ } } - sqlite3_free(pTab->apChange); + sessionFree(pSession, pTab->apChange); pTab->nChange = nNew; pTab->apChange = apNew; } @@ -204251,6 +206549,7 @@ static int sessionGrowHash(int bPatchset, SessionTable *pTab){ ** be freed using sqlite3_free() by the caller */ static int sessionTableInfo( + sqlite3_session *pSession, /* For memory accounting. May be NULL */ sqlite3 *db, /* Database connection */ const char *zDb, /* Name of attached database (e.g. "main") */ const char *zThis, /* Table name */ @@ -204305,7 +206604,7 @@ static int sessionTableInfo( if( rc==SQLITE_OK ){ nByte += nDbCol * (sizeof(const char *) + sizeof(u8) + 1); - pAlloc = sqlite3_malloc64(nByte); + pAlloc = sessionMalloc64(pSession, nByte); if( pAlloc==0 ){ rc = SQLITE_NOMEM; } @@ -204348,7 +206647,7 @@ static int sessionTableInfo( *pabPK = 0; *pnCol = 0; if( pzTab ) *pzTab = 0; - sqlite3_free(azCol); + sessionFree(pSession, azCol); } sqlite3_finalize(pStmt); return rc; @@ -204370,7 +206669,7 @@ static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){ if( pTab->nCol==0 ){ u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); - pSession->rc = sessionTableInfo(pSession->db, pSession->zDb, + pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK ); if( pSession->rc==SQLITE_OK ){ @@ -204461,7 +206760,7 @@ static void sessionPreupdateOneChange( } /* Grow the hash table if required */ - if( sessionGrowHash(0, pTab) ){ + if( sessionGrowHash(pSession, 0, pTab) ){ pSession->rc = SQLITE_NOMEM; return; } @@ -204528,7 +206827,7 @@ static void sessionPreupdateOneChange( } /* Allocate the change object */ - pChange = (SessionChange *)sqlite3_malloc64(nByte); + pChange = (SessionChange *)sessionMalloc64(pSession, nByte); if( !pChange ){ rc = SQLITE_NOMEM; goto error_out; @@ -204901,7 +207200,7 @@ SQLITE_API int sqlite3session_diff( int nCol; /* Columns in zFrom.zTbl */ u8 *abPK; const char **azCol = 0; - rc = sessionTableInfo(db, zFrom, zTbl, &nCol, 0, &azCol, &abPK); + rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, &abPK); if( rc==SQLITE_OK ){ if( pTo->nCol!=nCol ){ bMismatch = 1; @@ -204999,7 +207298,7 @@ SQLITE_API int sqlite3session_create( ** Free the list of table objects passed as the first argument. The contents ** of the changed-rows hash tables are also deleted. */ -static void sessionDeleteTable(SessionTable *pList){ +static void sessionDeleteTable(sqlite3_session *pSession, SessionTable *pList){ SessionTable *pNext; SessionTable *pTab; @@ -205011,12 +207310,12 @@ static void sessionDeleteTable(SessionTable *pList){ SessionChange *pNextChange; for(p=pTab->apChange[i]; p; p=pNextChange){ pNextChange = p->pNext; - sqlite3_free(p); + sessionFree(pSession, p); } } - sqlite3_free((char*)pTab->azCol); /* cast works around VC++ bug */ - sqlite3_free(pTab->apChange); - sqlite3_free(pTab); + sessionFree(pSession, (char*)pTab->azCol); /* cast works around VC++ bug */ + sessionFree(pSession, pTab->apChange); + sessionFree(pSession, pTab); } } @@ -205044,9 +207343,11 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession){ /* Delete all attached table objects. And the contents of their ** associated hash-tables. */ - sessionDeleteTable(pSession->pTable); + sessionDeleteTable(pSession, pSession->pTable); - /* Free the session object itself. */ + /* Assert that all allocations have been freed and then free the + ** session object itself. */ + assert( pSession->nMalloc==0 ); sqlite3_free(pSession); } @@ -205093,7 +207394,8 @@ SQLITE_API int sqlite3session_attach( if( !pTab ){ /* Allocate new SessionTable object. */ - pTab = (SessionTable *)sqlite3_malloc64(sizeof(SessionTable) + nName + 1); + int nByte = sizeof(SessionTable) + nName + 1; + pTab = (SessionTable*)sessionMalloc64(pSession, nByte); if( !pTab ){ rc = SQLITE_NOMEM; }else{ @@ -205690,7 +207992,7 @@ static int sessionGenerateChangeset( int nNoop; /* Size of buffer after writing tbl header */ /* Check the table schema is still Ok. */ - rc = sessionTableInfo(db, pSession->zDb, zName, &nCol, 0, &azCol, &abPK); + rc = sessionTableInfo(0, db, pSession->zDb, zName, &nCol, 0,&azCol,&abPK); if( !rc && (pTab->nCol!=nCol || memcmp(abPK, pTab->abPK, nCol)) ){ rc = SQLITE_SCHEMA; } @@ -205865,6 +208167,13 @@ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession){ return (ret==0); } +/* +** Return the amount of heap memory in use. +*/ +SQLITE_API sqlite3_int64 sqlite3session_memory_used(sqlite3_session *pSession){ + return pSession->nMalloc; +} + /* ** Do the work for either sqlite3changeset_start() or start_strm(). */ @@ -205874,7 +208183,8 @@ static int sessionChangesetStart( void *pIn, int nChangeset, /* Size of buffer pChangeset in bytes */ void *pChangeset, /* Pointer to buffer containing changeset */ - int bInvert /* True to invert changeset */ + int bInvert, /* True to invert changeset */ + int bSkipEmpty /* True to skip empty UPDATE changes */ ){ sqlite3_changeset_iter *pRet; /* Iterator to return */ int nByte; /* Number of bytes to allocate for iterator */ @@ -205895,6 +208205,7 @@ static int sessionChangesetStart( pRet->in.pIn = pIn; pRet->in.bEof = (xInput ? 0 : 1); pRet->bInvert = bInvert; + pRet->bSkipEmpty = bSkipEmpty; /* Populate the output variable and return success. */ *pp = pRet; @@ -205909,7 +208220,7 @@ SQLITE_API int sqlite3changeset_start( int nChangeset, /* Size of buffer pChangeset in bytes */ void *pChangeset /* Pointer to buffer containing changeset */ ){ - return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, 0); + return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, 0, 0); } SQLITE_API int sqlite3changeset_start_v2( sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */ @@ -205918,7 +208229,7 @@ SQLITE_API int sqlite3changeset_start_v2( int flags ){ int bInvert = !!(flags & SQLITE_CHANGESETSTART_INVERT); - return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, bInvert); + return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, bInvert, 0); } /* @@ -205929,7 +208240,7 @@ SQLITE_API int sqlite3changeset_start_strm( int (*xInput)(void *pIn, void *pData, int *pnData), void *pIn ){ - return sessionChangesetStart(pp, xInput, pIn, 0, 0, 0); + return sessionChangesetStart(pp, xInput, pIn, 0, 0, 0, 0); } SQLITE_API int sqlite3changeset_start_v2_strm( sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */ @@ -205938,7 +208249,7 @@ SQLITE_API int sqlite3changeset_start_v2_strm( int flags ){ int bInvert = !!(flags & SQLITE_CHANGESETSTART_INVERT); - return sessionChangesetStart(pp, xInput, pIn, 0, 0, bInvert); + return sessionChangesetStart(pp, xInput, pIn, 0, 0, bInvert, 0); } /* @@ -206064,11 +208375,14 @@ static int sessionReadRecord( SessionInput *pIn, /* Input data */ int nCol, /* Number of values in record */ u8 *abPK, /* Array of primary key flags, or NULL */ - sqlite3_value **apOut /* Write values to this array */ + sqlite3_value **apOut, /* Write values to this array */ + int *pbEmpty ){ int i; /* Used to iterate through columns */ int rc = SQLITE_OK; + assert( pbEmpty==0 || *pbEmpty==0 ); + if( pbEmpty ) *pbEmpty = 1; for(i=0; i<nCol && rc==SQLITE_OK; i++){ int eType = 0; /* Type of value (SQLITE_NULL, TEXT etc.) */ if( abPK && abPK[i]==0 ) continue; @@ -206080,6 +208394,7 @@ static int sessionReadRecord( eType = pIn->aData[pIn->iNext++]; assert( apOut[i]==0 ); if( eType ){ + if( pbEmpty ) *pbEmpty = 0; apOut[i] = sqlite3ValueNew(0); if( !apOut[i] ) rc = SQLITE_NOMEM; } @@ -206259,31 +208574,27 @@ static int sessionChangesetReadTblhdr(sqlite3_changeset_iter *p){ } /* -** Advance the changeset iterator to the next change. +** Advance the changeset iterator to the next change. The differences between +** this function and sessionChangesetNext() are that ** -** If both paRec and pnRec are NULL, then this function works like the public -** API sqlite3changeset_next(). If SQLITE_ROW is returned, then the -** sqlite3changeset_new() and old() APIs may be used to query for values. +** * If pbEmpty is not NULL and the change is a no-op UPDATE (an UPDATE +** that modifies no columns), this function sets (*pbEmpty) to 1. ** -** Otherwise, if paRec and pnRec are not NULL, then a pointer to the change -** record is written to *paRec before returning and the number of bytes in -** the record to *pnRec. -** -** Either way, this function returns SQLITE_ROW if the iterator is -** successfully advanced to the next change in the changeset, an SQLite -** error code if an error occurs, or SQLITE_DONE if there are no further -** changes in the changeset. +** * If the iterator is configured to skip no-op UPDATEs, +** sessionChangesetNext() does that. This function does not. */ -static int sessionChangesetNext( +static int sessionChangesetNextOne( sqlite3_changeset_iter *p, /* Changeset iterator */ u8 **paRec, /* If non-NULL, store record pointer here */ int *pnRec, /* If non-NULL, store size of record here */ - int *pbNew /* If non-NULL, true if new table */ + int *pbNew, /* If non-NULL, true if new table */ + int *pbEmpty ){ int i; u8 op; assert( (paRec==0 && pnRec==0) || (paRec && pnRec) ); + assert( pbEmpty==0 || *pbEmpty==0 ); /* If the iterator is in the error-state, return immediately. */ if( p->rc!=SQLITE_OK ) return p->rc; @@ -206356,13 +208667,13 @@ static int sessionChangesetNext( /* If this is an UPDATE or DELETE, read the old.* record. */ if( p->op!=SQLITE_INSERT && (p->bPatchset==0 || p->op==SQLITE_DELETE) ){ u8 *abPK = p->bPatchset ? p->abPK : 0; - p->rc = sessionReadRecord(&p->in, p->nCol, abPK, apOld); + p->rc = sessionReadRecord(&p->in, p->nCol, abPK, apOld, 0); if( p->rc!=SQLITE_OK ) return p->rc; } /* If this is an INSERT or UPDATE, read the new.* record. */ if( p->op!=SQLITE_DELETE ){ - p->rc = sessionReadRecord(&p->in, p->nCol, 0, apNew); + p->rc = sessionReadRecord(&p->in, p->nCol, 0, apNew, pbEmpty); if( p->rc!=SQLITE_OK ) return p->rc; } @@ -206389,6 +208700,37 @@ static int sessionChangesetNext( return SQLITE_ROW; } +/* +** Advance the changeset iterator to the next change. +** +** If both paRec and pnRec are NULL, then this function works like the public +** API sqlite3changeset_next(). If SQLITE_ROW is returned, then the +** sqlite3changeset_new() and old() APIs may be used to query for values. +** +** Otherwise, if paRec and pnRec are not NULL, then a pointer to the change +** record is written to *paRec before returning and the number of bytes in +** the record to *pnRec. +** +** Either way, this function returns SQLITE_ROW if the iterator is +** successfully advanced to the next change in the changeset, an SQLite +** error code if an error occurs, or SQLITE_DONE if there are no further +** changes in the changeset. +*/ +static int sessionChangesetNext( + sqlite3_changeset_iter *p, /* Changeset iterator */ + u8 **paRec, /* If non-NULL, store record pointer here */ + int *pnRec, /* If non-NULL, store size of record here */ + int *pbNew /* If non-NULL, true if new table */ +){ + int bEmpty; + int rc; + do { + bEmpty = 0; + rc = sessionChangesetNextOne(p, paRec, pnRec, pbNew, &bEmpty); + }while( rc==SQLITE_ROW && p->bSkipEmpty && bEmpty); + return rc; +} + /* ** Advance an iterator created by sqlite3changeset_start() to the next ** change in the changeset. This function may return SQLITE_ROW, SQLITE_DONE @@ -206661,9 +209003,9 @@ static int sessionChangesetInvert( /* Read the old.* and new.* records for the update change. */ pInput->iNext += 2; - rc = sessionReadRecord(pInput, nCol, 0, &apVal[0]); + rc = sessionReadRecord(pInput, nCol, 0, &apVal[0], 0); if( rc==SQLITE_OK ){ - rc = sessionReadRecord(pInput, nCol, 0, &apVal[nCol]); + rc = sessionReadRecord(pInput, nCol, 0, &apVal[nCol], 0); } /* Write the new old.* record. Consists of the PK columns from the @@ -206764,16 +209106,25 @@ SQLITE_API int sqlite3changeset_invert_strm( return rc; } + +typedef struct SessionUpdate SessionUpdate; +struct SessionUpdate { + sqlite3_stmt *pStmt; + u32 *aMask; + SessionUpdate *pNext; +}; + typedef struct SessionApplyCtx SessionApplyCtx; struct SessionApplyCtx { sqlite3 *db; sqlite3_stmt *pDelete; /* DELETE statement */ - sqlite3_stmt *pUpdate; /* UPDATE statement */ sqlite3_stmt *pInsert; /* INSERT statement */ sqlite3_stmt *pSelect; /* SELECT statement */ int nCol; /* Size of azCol[] and abPK[] arrays */ const char **azCol; /* Array of column names */ u8 *abPK; /* Boolean array - true if column is in PK */ + u32 *aUpdateMask; /* Used by sessionUpdateFind */ + SessionUpdate *pUp; int bStat1; /* True if table is sqlite_stat1 */ int bDeferConstraints; /* True to defer constraints */ int bInvertConstraints; /* Invert when iterating constraints buffer */ @@ -206783,6 +209134,167 @@ struct SessionApplyCtx { u8 bRebase; /* True to collect rebase information */ }; +/* Number of prepared UPDATE statements to cache. */ +#define SESSION_UPDATE_CACHE_SZ 12 + +/* +** Find a prepared UPDATE statement suitable for the UPDATE step currently +** being visited by the iterator. The UPDATE is of the form: +** +** UPDATE tbl SET col = ?, col2 = ? WHERE pk1 IS ? AND pk2 IS ? +*/ +static int sessionUpdateFind( + sqlite3_changeset_iter *pIter, + SessionApplyCtx *p, + int bPatchset, + sqlite3_stmt **ppStmt +){ + int rc = SQLITE_OK; + SessionUpdate *pUp = 0; + int nCol = pIter->nCol; + int nU32 = (pIter->nCol+33)/32; + int ii; + + if( p->aUpdateMask==0 ){ + p->aUpdateMask = sqlite3_malloc(nU32*sizeof(u32)); + if( p->aUpdateMask==0 ){ + rc = SQLITE_NOMEM; + } + } + + if( rc==SQLITE_OK ){ + memset(p->aUpdateMask, 0, nU32*sizeof(u32)); + rc = SQLITE_CORRUPT; + for(ii=0; ii<pIter->nCol; ii++){ + if( sessionChangesetNew(pIter, ii) ){ + p->aUpdateMask[ii/32] |= (1<<(ii%32)); + rc = SQLITE_OK; + } + } + } + + if( rc==SQLITE_OK ){ + if( bPatchset ) p->aUpdateMask[nCol/32] |= (1<<(nCol%32)); + + if( p->pUp ){ + int nUp = 0; + SessionUpdate **pp = &p->pUp; + while( 1 ){ + nUp++; + if( 0==memcmp(p->aUpdateMask, (*pp)->aMask, nU32*sizeof(u32)) ){ + pUp = *pp; + *pp = pUp->pNext; + pUp->pNext = p->pUp; + p->pUp = pUp; + break; + } + + if( (*pp)->pNext ){ + pp = &(*pp)->pNext; + }else{ + if( nUp>=SESSION_UPDATE_CACHE_SZ ){ + sqlite3_finalize((*pp)->pStmt); + sqlite3_free(*pp); + *pp = 0; + } + break; + } + } + } + + if( pUp==0 ){ + int nByte = sizeof(SessionUpdate) * nU32*sizeof(u32); + int bStat1 = (sqlite3_stricmp(pIter->zTab, "sqlite_stat1")==0); + pUp = (SessionUpdate*)sqlite3_malloc(nByte); + if( pUp==0 ){ + rc = SQLITE_NOMEM; + }else{ + const char *zSep = ""; + SessionBuffer buf; + + memset(&buf, 0, sizeof(buf)); + pUp->aMask = (u32*)&pUp[1]; + memcpy(pUp->aMask, p->aUpdateMask, nU32*sizeof(u32)); + + sessionAppendStr(&buf, "UPDATE main.", &rc); + sessionAppendIdent(&buf, pIter->zTab, &rc); + sessionAppendStr(&buf, " SET ", &rc); + + /* Create the assignments part of the UPDATE */ + for(ii=0; ii<pIter->nCol; ii++){ + if( p->abPK[ii]==0 && sessionChangesetNew(pIter, ii) ){ + sessionAppendStr(&buf, zSep, &rc); + sessionAppendIdent(&buf, p->azCol[ii], &rc); + sessionAppendStr(&buf, " = ?", &rc); + sessionAppendInteger(&buf, ii*2+1, &rc); + zSep = ", "; + } + } + + /* Create the WHERE clause part of the UPDATE */ + zSep = ""; + sessionAppendStr(&buf, " WHERE ", &rc); + for(ii=0; ii<pIter->nCol; ii++){ + if( p->abPK[ii] || (bPatchset==0 && sessionChangesetOld(pIter, ii)) ){ + sessionAppendStr(&buf, zSep, &rc); + if( bStat1 && ii==1 ){ + assert( sqlite3_stricmp(p->azCol[ii], "idx")==0 ); + sessionAppendStr(&buf, + "idx IS CASE " + "WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL " + "ELSE ?4 END ", &rc + ); + }else{ + sessionAppendIdent(&buf, p->azCol[ii], &rc); + sessionAppendStr(&buf, " IS ?", &rc); + sessionAppendInteger(&buf, ii*2+2, &rc); + } + zSep = " AND "; + } + } + + if( rc==SQLITE_OK ){ + char *zSql = (char*)buf.aBuf; + rc = sqlite3_prepare_v2(p->db, zSql, buf.nBuf, &pUp->pStmt, 0); + } + + if( rc!=SQLITE_OK ){ + sqlite3_free(pUp); + pUp = 0; + }else{ + pUp->pNext = p->pUp; + p->pUp = pUp; + } + sqlite3_free(buf.aBuf); + } + } + } + + assert( (rc==SQLITE_OK)==(pUp!=0) ); + if( pUp ){ + *ppStmt = pUp->pStmt; + }else{ + *ppStmt = 0; + } + return rc; +} + +/* +** Free all cached UPDATE statements. +*/ +static void sessionUpdateFree(SessionApplyCtx *p){ + SessionUpdate *pUp; + SessionUpdate *pNext; + for(pUp=p->pUp; pUp; pUp=pNext){ + pNext = pUp->pNext; + sqlite3_finalize(pUp->pStmt); + sqlite3_free(pUp); + } + p->pUp = 0; + sqlite3_free(p->aUpdateMask); + p->aUpdateMask = 0; +} + /* ** Formulate a statement to DELETE a row from database db. Assuming a table ** structure like this: @@ -206852,103 +209364,6 @@ static int sessionDeleteRow( return rc; } -/* -** Formulate and prepare a statement to UPDATE a row from database db. -** Assuming a table structure like this: -** -** CREATE TABLE x(a, b, c, d, PRIMARY KEY(a, c)); -** -** The UPDATE statement looks like this: -** -** UPDATE x SET -** a = CASE WHEN ?2 THEN ?3 ELSE a END, -** b = CASE WHEN ?5 THEN ?6 ELSE b END, -** c = CASE WHEN ?8 THEN ?9 ELSE c END, -** d = CASE WHEN ?11 THEN ?12 ELSE d END -** WHERE a = ?1 AND c = ?7 AND (?13 OR -** (?5==0 OR b IS ?4) AND (?11==0 OR d IS ?10) AND -** ) -** -** For each column in the table, there are three variables to bind: -** -** ?(i*3+1) The old.* value of the column, if any. -** ?(i*3+2) A boolean flag indicating that the value is being modified. -** ?(i*3+3) The new.* value of the column, if any. -** -** Also, a boolean flag that, if set to true, causes the statement to update -** a row even if the non-PK values do not match. This is required if the -** conflict-handler is invoked with CHANGESET_DATA and returns -** CHANGESET_REPLACE. This is variable "?(nCol*3+1)". -** -** If successful, SQLITE_OK is returned and SessionApplyCtx.pUpdate is left -** pointing to the prepared version of the SQL statement. -*/ -static int sessionUpdateRow( - sqlite3 *db, /* Database handle */ - const char *zTab, /* Table name */ - SessionApplyCtx *p /* Session changeset-apply context */ -){ - int rc = SQLITE_OK; - int i; - const char *zSep = ""; - SessionBuffer buf = {0, 0, 0}; - - /* Append "UPDATE tbl SET " */ - sessionAppendStr(&buf, "UPDATE main.", &rc); - sessionAppendIdent(&buf, zTab, &rc); - sessionAppendStr(&buf, " SET ", &rc); - - /* Append the assignments */ - for(i=0; i<p->nCol; i++){ - sessionAppendStr(&buf, zSep, &rc); - sessionAppendIdent(&buf, p->azCol[i], &rc); - sessionAppendStr(&buf, " = CASE WHEN ?", &rc); - sessionAppendInteger(&buf, i*3+2, &rc); - sessionAppendStr(&buf, " THEN ?", &rc); - sessionAppendInteger(&buf, i*3+3, &rc); - sessionAppendStr(&buf, " ELSE ", &rc); - sessionAppendIdent(&buf, p->azCol[i], &rc); - sessionAppendStr(&buf, " END", &rc); - zSep = ", "; - } - - /* Append the PK part of the WHERE clause */ - sessionAppendStr(&buf, " WHERE ", &rc); - for(i=0; i<p->nCol; i++){ - if( p->abPK[i] ){ - sessionAppendIdent(&buf, p->azCol[i], &rc); - sessionAppendStr(&buf, " = ?", &rc); - sessionAppendInteger(&buf, i*3+1, &rc); - sessionAppendStr(&buf, " AND ", &rc); - } - } - - /* Append the non-PK part of the WHERE clause */ - sessionAppendStr(&buf, " (?", &rc); - sessionAppendInteger(&buf, p->nCol*3+1, &rc); - sessionAppendStr(&buf, " OR 1", &rc); - for(i=0; i<p->nCol; i++){ - if( !p->abPK[i] ){ - sessionAppendStr(&buf, " AND (?", &rc); - sessionAppendInteger(&buf, i*3+2, &rc); - sessionAppendStr(&buf, "=0 OR ", &rc); - sessionAppendIdent(&buf, p->azCol[i], &rc); - sessionAppendStr(&buf, " IS ?", &rc); - sessionAppendInteger(&buf, i*3+1, &rc); - sessionAppendStr(&buf, ")", &rc); - } - } - sessionAppendStr(&buf, ")", &rc); - - if( rc==SQLITE_OK ){ - rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pUpdate, 0); - } - sqlite3_free(buf.aBuf); - - return rc; -} - - /* ** Formulate and prepare an SQL statement to query table zTab by primary ** key. Assuming the following table structure: @@ -207029,17 +209444,6 @@ static int sessionStat1Sql(sqlite3 *db, SessionApplyCtx *p){ "?3)" ); } - if( rc==SQLITE_OK ){ - rc = sessionPrepare(db, &p->pUpdate, - "UPDATE main.sqlite_stat1 SET " - "tbl = CASE WHEN ?2 THEN ?3 ELSE tbl END, " - "idx = CASE WHEN ?5 THEN ?6 ELSE idx END, " - "stat = CASE WHEN ?8 THEN ?9 ELSE stat END " - "WHERE tbl=?1 AND idx IS " - "CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END " - "AND (?10 OR ?8=0 OR stat IS ?7)" - ); - } if( rc==SQLITE_OK ){ rc = sessionPrepare(db, &p->pDelete, "DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS " @@ -207356,7 +209760,7 @@ static int sessionApplyOneOp( int nCol; int rc = SQLITE_OK; - assert( p->pDelete && p->pUpdate && p->pInsert && p->pSelect ); + assert( p->pDelete && p->pInsert && p->pSelect ); assert( p->azCol && p->abPK ); assert( !pbReplace || *pbReplace==0 ); @@ -207396,29 +209800,28 @@ static int sessionApplyOneOp( }else if( op==SQLITE_UPDATE ){ int i; + sqlite3_stmt *pUp = 0; + int bPatchset = (pbRetry==0 || pIter->bPatchset); + + rc = sessionUpdateFind(pIter, p, bPatchset, &pUp); /* Bind values to the UPDATE statement. */ for(i=0; rc==SQLITE_OK && i<nCol; i++){ sqlite3_value *pOld = sessionChangesetOld(pIter, i); sqlite3_value *pNew = sessionChangesetNew(pIter, i); - - sqlite3_bind_int(p->pUpdate, i*3+2, !!pNew); - if( pOld ){ - rc = sessionBindValue(p->pUpdate, i*3+1, pOld); + if( p->abPK[i] || (bPatchset==0 && pOld) ){ + rc = sessionBindValue(pUp, i*2+2, pOld); } if( rc==SQLITE_OK && pNew ){ - rc = sessionBindValue(p->pUpdate, i*3+3, pNew); + rc = sessionBindValue(pUp, i*2+1, pNew); } } - if( rc==SQLITE_OK ){ - sqlite3_bind_int(p->pUpdate, nCol*3+1, pbRetry==0 || pIter->bPatchset); - } if( rc!=SQLITE_OK ) return rc; /* Attempt the UPDATE. In the case of a NOTFOUND or DATA conflict, ** the result will be SQLITE_OK with 0 rows modified. */ - sqlite3_step(p->pUpdate); - rc = sqlite3_reset(p->pUpdate); + sqlite3_step(pUp); + rc = sqlite3_reset(pUp); if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){ /* A NOTFOUND or DATA error. Search the table to see if it contains @@ -207550,7 +209953,7 @@ static int sessionRetryConstraints( memset(&pApply->constraints, 0, sizeof(SessionBuffer)); rc = sessionChangesetStart( - &pIter2, 0, 0, cons.nBuf, cons.aBuf, pApply->bInvertConstraints + &pIter2, 0, 0, cons.nBuf, cons.aBuf, pApply->bInvertConstraints, 1 ); if( rc==SQLITE_OK ){ size_t nByte = 2*pApply->nCol*sizeof(sqlite3_value*); @@ -207641,14 +210044,13 @@ static int sessionChangesetApply( ); if( rc!=SQLITE_OK ) break; + sessionUpdateFree(&sApply); sqlite3_free((char*)sApply.azCol); /* cast works around VC++ bug */ sqlite3_finalize(sApply.pDelete); - sqlite3_finalize(sApply.pUpdate); sqlite3_finalize(sApply.pInsert); sqlite3_finalize(sApply.pSelect); sApply.db = db; sApply.pDelete = 0; - sApply.pUpdate = 0; sApply.pInsert = 0; sApply.pSelect = 0; sApply.nCol = 0; @@ -207676,7 +210078,7 @@ static int sessionChangesetApply( int i; sqlite3changeset_pk(pIter, &abPK, 0); - rc = sessionTableInfo( + rc = sessionTableInfo(0, db, "main", zNew, &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK ); if( rc!=SQLITE_OK ) break; @@ -207712,11 +210114,10 @@ static int sessionChangesetApply( } sApply.bStat1 = 1; }else{ - if((rc = sessionSelectRow(db, zTab, &sApply)) - || (rc = sessionUpdateRow(db, zTab, &sApply)) - || (rc = sessionDeleteRow(db, zTab, &sApply)) - || (rc = sessionInsertRow(db, zTab, &sApply)) - ){ + if( (rc = sessionSelectRow(db, zTab, &sApply)) + || (rc = sessionDeleteRow(db, zTab, &sApply)) + || (rc = sessionInsertRow(db, zTab, &sApply)) + ){ break; } sApply.bStat1 = 0; @@ -207775,9 +210176,9 @@ static int sessionChangesetApply( *pnRebase = sApply.rebase.nBuf; sApply.rebase.aBuf = 0; } + sessionUpdateFree(&sApply); sqlite3_finalize(sApply.pInsert); sqlite3_finalize(sApply.pDelete); - sqlite3_finalize(sApply.pUpdate); sqlite3_finalize(sApply.pSelect); sqlite3_free((char*)sApply.azCol); /* cast works around VC++ bug */ sqlite3_free((char*)sApply.constraints.aBuf); @@ -207808,8 +210209,8 @@ SQLITE_API int sqlite3changeset_apply_v2( int flags ){ sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ - int bInverse = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); - int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset,bInverse); + int bInv = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); + int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset, bInv, 1); if( rc==SQLITE_OK ){ rc = sessionChangesetApply( db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags @@ -207867,7 +210268,7 @@ SQLITE_API int sqlite3changeset_apply_v2_strm( ){ sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ int bInverse = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); - int rc = sessionChangesetStart(&pIter, xInput, pIn, 0, 0, bInverse); + int rc = sessionChangesetStart(&pIter, xInput, pIn, 0, 0, bInverse, 1); if( rc==SQLITE_OK ){ rc = sessionChangesetApply( db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags @@ -208155,7 +210556,7 @@ static int sessionChangesetToHash( } } - if( sessionGrowHash(pIter->bPatchset, pTab) ){ + if( sessionGrowHash(0, pIter->bPatchset, pTab) ){ rc = SQLITE_NOMEM; break; } @@ -208341,7 +210742,7 @@ SQLITE_API int sqlite3changegroup_output_strm( */ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){ if( pGrp ){ - sessionDeleteTable(pGrp->pList); + sessionDeleteTable(0, pGrp->pList); sqlite3_free(pGrp); } } @@ -208487,7 +210888,7 @@ static void sessionAppendPartialUpdate( int n1 = sessionSerialLen(a1); int n2 = sessionSerialLen(a2); if( pIter->abPK[i] || a2[0]==0 ){ - if( !pIter->abPK[i] ) bData = 1; + if( !pIter->abPK[i] && a1[0] ) bData = 1; memcpy(pOut, a1, n1); pOut += n1; }else if( a2[0]!=0xFF ){ @@ -208742,7 +211143,7 @@ SQLITE_API int sqlite3rebaser_rebase_strm( */ SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p){ if( p ){ - sessionDeleteTable(p->grp.pList); + sessionDeleteTable(0, p->grp.pList); sqlite3_free(p); } } @@ -211204,55 +213605,6 @@ static fts5YYACTIONTYPE fts5yy_reduce( (void)fts5yyLookahead; (void)fts5yyLookaheadToken; fts5yymsp = fts5yypParser->fts5yytos; - assert( fts5yyruleno<(int)(sizeof(fts5yyRuleName)/sizeof(fts5yyRuleName[0])) ); -#ifndef NDEBUG - if( fts5yyTraceFILE ){ - fts5yysize = fts5yyRuleInfoNRhs[fts5yyruleno]; - if( fts5yysize ){ - fprintf(fts5yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", - fts5yyTracePrompt, - fts5yyruleno, fts5yyRuleName[fts5yyruleno], - fts5yyruleno<fts5YYNRULE_WITH_ACTION ? "" : " without external action", - fts5yymsp[fts5yysize].stateno); - }else{ - fprintf(fts5yyTraceFILE, "%sReduce %d [%s]%s.\n", - fts5yyTracePrompt, fts5yyruleno, fts5yyRuleName[fts5yyruleno], - fts5yyruleno<fts5YYNRULE_WITH_ACTION ? "" : " without external action"); - } - } -#endif /* NDEBUG */ - - /* Check that the stack is large enough to grow by a single entry - ** if the RHS of the rule is empty. This ensures that there is room - ** enough on the stack to push the LHS value */ - if( fts5yyRuleInfoNRhs[fts5yyruleno]==0 ){ -#ifdef fts5YYTRACKMAXSTACKDEPTH - if( (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)>fts5yypParser->fts5yyhwm ){ - fts5yypParser->fts5yyhwm++; - assert( fts5yypParser->fts5yyhwm == (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)); - } -#endif -#if fts5YYSTACKDEPTH>0 - if( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystackEnd ){ - fts5yyStackOverflow(fts5yypParser); - /* The call to fts5yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } -#else - if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz-1] ){ - if( fts5yyGrowStack(fts5yypParser) ){ - fts5yyStackOverflow(fts5yypParser); - /* The call to fts5yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } - fts5yymsp = fts5yypParser->fts5yytos; - } -#endif - } switch( fts5yyruleno ){ /* Beginning here are the reduction cases. A typical example @@ -211555,12 +213907,56 @@ static void sqlite3Fts5Parser( } #endif - do{ + while(1){ /* Exit by "break" */ + assert( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystack ); assert( fts5yyact==fts5yypParser->fts5yytos->stateno ); fts5yyact = fts5yy_find_shift_action((fts5YYCODETYPE)fts5yymajor,fts5yyact); if( fts5yyact >= fts5YY_MIN_REDUCE ){ - fts5yyact = fts5yy_reduce(fts5yypParser,fts5yyact-fts5YY_MIN_REDUCE,fts5yymajor, - fts5yyminor sqlite3Fts5ParserCTX_PARAM); + unsigned int fts5yyruleno = fts5yyact - fts5YY_MIN_REDUCE; /* Reduce by this rule */ + assert( fts5yyruleno<(int)(sizeof(fts5yyRuleName)/sizeof(fts5yyRuleName[0])) ); +#ifndef NDEBUG + if( fts5yyTraceFILE ){ + int fts5yysize = fts5yyRuleInfoNRhs[fts5yyruleno]; + if( fts5yysize ){ + fprintf(fts5yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", + fts5yyTracePrompt, + fts5yyruleno, fts5yyRuleName[fts5yyruleno], + fts5yyruleno<fts5YYNRULE_WITH_ACTION ? "" : " without external action", + fts5yypParser->fts5yytos[fts5yysize].stateno); + }else{ + fprintf(fts5yyTraceFILE, "%sReduce %d [%s]%s.\n", + fts5yyTracePrompt, fts5yyruleno, fts5yyRuleName[fts5yyruleno], + fts5yyruleno<fts5YYNRULE_WITH_ACTION ? "" : " without external action"); + } + } +#endif /* NDEBUG */ + + /* Check that the stack is large enough to grow by a single entry + ** if the RHS of the rule is empty. This ensures that there is room + ** enough on the stack to push the LHS value */ + if( fts5yyRuleInfoNRhs[fts5yyruleno]==0 ){ +#ifdef fts5YYTRACKMAXSTACKDEPTH + if( (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)>fts5yypParser->fts5yyhwm ){ + fts5yypParser->fts5yyhwm++; + assert( fts5yypParser->fts5yyhwm == + (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)); + } +#endif +#if fts5YYSTACKDEPTH>0 + if( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystackEnd ){ + fts5yyStackOverflow(fts5yypParser); + break; + } +#else + if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz-1] ){ + if( fts5yyGrowStack(fts5yypParser) ){ + fts5yyStackOverflow(fts5yypParser); + break; + } + } +#endif + } + fts5yyact = fts5yy_reduce(fts5yypParser,fts5yyruleno,fts5yymajor,fts5yyminor sqlite3Fts5ParserCTX_PARAM); }else if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){ fts5yy_shift(fts5yypParser,fts5yyact,(fts5YYCODETYPE)fts5yymajor,fts5yyminor); #ifndef fts5YYNOERRORRECOVERY @@ -211673,7 +214069,7 @@ static void sqlite3Fts5Parser( break; #endif } - }while( fts5yypParser->fts5yytos>fts5yypParser->fts5yystack ); + } #ifndef NDEBUG if( fts5yyTraceFILE ){ fts5yyStackEntry *i; @@ -215285,8 +217681,8 @@ static int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bD } /* If the iterator is not at a real match, skip forward until it is. */ - while( pRoot->bNomatch ){ - assert( pRoot->bEof==0 && rc==SQLITE_OK ); + while( pRoot->bNomatch && rc==SQLITE_OK ){ + assert( pRoot->bEof==0 ); rc = fts5ExprNodeNext(p, pRoot, 0, 0); } return rc; @@ -219471,14 +221867,10 @@ static void fts5SegIterNext( }else{ /* The following could be done by calling fts5SegIterLoadNPos(). But ** this block is particularly performance critical, so equivalent - ** code is inlined. - ** - ** Later: Switched back to fts5SegIterLoadNPos() because it supports - ** detail=none mode. Not ideal. - */ + ** code is inlined. */ int nSz; assert( p->rc==SQLITE_OK ); - assert( pIter->iLeafOffset<=pIter->pLeaf->nn ); + assert_nc( pIter->iLeafOffset<=pIter->pLeaf->nn ); fts5FastGetVarint32(pIter->pLeaf->p, pIter->iLeafOffset, nSz); pIter->bDel = (nSz & 0x0001); pIter->nPos = nSz>>1; @@ -220470,7 +222862,7 @@ static void fts5ChunkIterate( int pgno = pSeg->iLeafPgno; int pgnoSave = 0; - /* This function does notmwork with detail=none databases. */ + /* This function does not work with detail=none databases. */ assert( p->pConfig->eDetail!=FTS5_DETAIL_NONE ); if( (pSeg->flags & FTS5_SEGITER_REVERSE)==0 ){ @@ -220483,6 +222875,9 @@ static void fts5ChunkIterate( fts5DataRelease(pData); if( nRem<=0 ){ break; + }else if( pSeg->pSeg==0 ){ + p->rc = FTS5_CORRUPT; + return; }else{ pgno++; pData = fts5LeafRead(p, FTS5_SEGMENT_ROWID(pSeg->pSeg->iSegid, pgno)); @@ -220534,66 +222929,72 @@ static void fts5SegiterPoslist( } /* -** IN/OUT parameter (*pa) points to a position list n bytes in size. If -** the position list contains entries for column iCol, then (*pa) is set -** to point to the sub-position-list for that column and the number of -** bytes in it returned. Or, if the argument position list does not -** contain any entries for column iCol, return 0. +** Parameter pPos points to a buffer containing a position list, size nPos. +** This function filters it according to pColset (which must be non-NULL) +** and sets pIter->base.pData/nData to point to the new position list. +** If memory is required for the new position list, use buffer pIter->poslist. +** Or, if the new position list is a contiguous subset of the input, set +** pIter->base.pData/nData to point directly to it. +** +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. If an OOM error is encountered, *pRc is set to SQLITE_NOMEM +** before returning. */ -static int fts5IndexExtractCol( - const u8 **pa, /* IN/OUT: Pointer to poslist */ - int n, /* IN: Size of poslist in bytes */ - int iCol /* Column to extract from poslist */ -){ - int iCurrent = 0; /* Anything before the first 0x01 is col 0 */ - const u8 *p = *pa; - const u8 *pEnd = &p[n]; /* One byte past end of position list */ - - while( iCol>iCurrent ){ - /* Advance pointer p until it points to pEnd or an 0x01 byte that is - ** not part of a varint. Note that it is not possible for a negative - ** or extremely large varint to occur within an uncorrupted position - ** list. So the last byte of each varint may be assumed to have a clear - ** 0x80 bit. */ - while( *p!=0x01 ){ - while( *p++ & 0x80 ); - if( p>=pEnd ) return 0; - } - *pa = p++; - iCurrent = *p++; - if( iCurrent & 0x80 ){ - p--; - p += fts5GetVarint32(p, iCurrent); - } - } - if( iCol!=iCurrent ) return 0; - - /* Advance pointer p until it points to pEnd or an 0x01 byte that is - ** not part of a varint */ - while( p<pEnd && *p!=0x01 ){ - while( *p++ & 0x80 ); - } - - return p - (*pa); -} - static void fts5IndexExtractColset( int *pRc, Fts5Colset *pColset, /* Colset to filter on */ const u8 *pPos, int nPos, /* Position list */ - Fts5Buffer *pBuf /* Output buffer */ + Fts5Iter *pIter ){ if( *pRc==SQLITE_OK ){ - int i; - fts5BufferZero(pBuf); - for(i=0; i<pColset->nCol; i++){ - const u8 *pSub = pPos; - int nSub = fts5IndexExtractCol(&pSub, nPos, pColset->aiCol[i]); - if( nSub ){ - fts5BufferAppendBlob(pRc, pBuf, nSub, pSub); + const u8 *p = pPos; + const u8 *aCopy = p; + const u8 *pEnd = &p[nPos]; /* One byte past end of position list */ + int i = 0; + int iCurrent = 0; + + if( pColset->nCol>1 && sqlite3Fts5BufferSize(pRc, &pIter->poslist, nPos) ){ + return; + } + + while( 1 ){ + while( pColset->aiCol[i]<iCurrent ){ + i++; + if( i==pColset->nCol ){ + pIter->base.pData = pIter->poslist.p; + pIter->base.nData = pIter->poslist.n; + return; + } + } + + /* Advance pointer p until it points to pEnd or an 0x01 byte that is + ** not part of a varint */ + while( p<pEnd && *p!=0x01 ){ + while( *p++ & 0x80 ); + } + + if( pColset->aiCol[i]==iCurrent ){ + if( pColset->nCol==1 ){ + pIter->base.pData = aCopy; + pIter->base.nData = p-aCopy; + return; + } + fts5BufferSafeAppendBlob(&pIter->poslist, aCopy, p-aCopy); + } + if( p==pEnd ){ + pIter->base.pData = pIter->poslist.p; + pIter->base.nData = pIter->poslist.n; + return; + } + aCopy = p++; + iCurrent = *p++; + if( iCurrent & 0x80 ){ + p--; + p += fts5GetVarint32(p, iCurrent); } } } + } /* @@ -220713,16 +223114,9 @@ static void fts5IterSetOutputs_Full(Fts5Iter *pIter, Fts5SegIter *pSeg){ /* All data is stored on the current page. Populate the output ** variables to point into the body of the page object. */ const u8 *a = &pSeg->pLeaf->p[pSeg->iLeafOffset]; - if( pColset->nCol==1 ){ - pIter->base.nData = fts5IndexExtractCol(&a, pSeg->nPos,pColset->aiCol[0]); - pIter->base.pData = a; - }else{ - int *pRc = &pIter->pIndex->rc; - fts5BufferZero(&pIter->poslist); - fts5IndexExtractColset(pRc, pColset, a, pSeg->nPos, &pIter->poslist); - pIter->base.pData = pIter->poslist.p; - pIter->base.nData = pIter->poslist.n; - } + int *pRc = &pIter->pIndex->rc; + fts5BufferZero(&pIter->poslist); + fts5IndexExtractColset(pRc, pColset, a, pSeg->nPos, pIter); }else{ /* The data is distributed over two or more pages. Copy it into the ** Fts5Iter.poslist buffer and then set the output pointer to point @@ -222205,7 +224599,7 @@ static void fts5AppendPoslist( static void fts5DoclistIterNext(Fts5DoclistIter *pIter){ u8 *p = pIter->aPoslist + pIter->nSize + pIter->nPoslist; - assert( pIter->aPoslist ); + assert( pIter->aPoslist || (p==0 && pIter->aPoslist==0) ); if( p>=pIter->aEof ){ pIter->aPoslist = 0; }else{ @@ -222225,6 +224619,9 @@ static void fts5DoclistIterNext(Fts5DoclistIter *pIter){ } pIter->aPoslist = p; + if( &pIter->aPoslist[pIter->nPoslist]>pIter->aEof ){ + pIter->aPoslist = 0; + } } } @@ -222233,9 +224630,11 @@ static void fts5DoclistIterInit( Fts5DoclistIter *pIter ){ memset(pIter, 0, sizeof(*pIter)); - pIter->aPoslist = pBuf->p; - pIter->aEof = &pBuf->p[pBuf->n]; - fts5DoclistIterNext(pIter); + if( pBuf->n>0 ){ + pIter->aPoslist = pBuf->p; + pIter->aEof = &pBuf->p[pBuf->n]; + fts5DoclistIterNext(pIter); + } } #if 0 @@ -222289,16 +224688,20 @@ static void fts5NextRowid(Fts5Buffer *pBuf, int *piOff, i64 *piRowid){ static void fts5MergeRowidLists( Fts5Index *p, /* FTS5 backend object */ Fts5Buffer *p1, /* First list to merge */ - Fts5Buffer *p2 /* Second list to merge */ + int nBuf, /* Number of entries in apBuf[] */ + Fts5Buffer *aBuf /* Array of other lists to merge into p1 */ ){ int i1 = 0; int i2 = 0; i64 iRowid1 = 0; i64 iRowid2 = 0; i64 iOut = 0; - + Fts5Buffer *p2 = &aBuf[0]; Fts5Buffer out; + + (void)nBuf; memset(&out, 0, sizeof(out)); + assert( nBuf==1 ); sqlite3Fts5BufferSize(&p->rc, &out, p1->n + p2->n); if( p->rc ) return; @@ -222325,180 +224728,213 @@ static void fts5MergeRowidLists( fts5BufferFree(&out); } +typedef struct PrefixMerger PrefixMerger; +struct PrefixMerger { + Fts5DoclistIter iter; /* Doclist iterator */ + i64 iPos; /* For iterating through a position list */ + int iOff; + u8 *aPos; + PrefixMerger *pNext; /* Next in docid/poslist order */ +}; + +static void fts5PrefixMergerInsertByRowid( + PrefixMerger **ppHead, + PrefixMerger *p +){ + if( p->iter.aPoslist ){ + PrefixMerger **pp = ppHead; + while( *pp && p->iter.iRowid>(*pp)->iter.iRowid ){ + pp = &(*pp)->pNext; + } + p->pNext = *pp; + *pp = p; + } +} + +static void fts5PrefixMergerInsertByPosition( + PrefixMerger **ppHead, + PrefixMerger *p +){ + if( p->iPos>=0 ){ + PrefixMerger **pp = ppHead; + while( *pp && p->iPos>(*pp)->iPos ){ + pp = &(*pp)->pNext; + } + p->pNext = *pp; + *pp = p; + } +} + + /* -** Buffers p1 and p2 contain doclists. This function merges the content -** of the two doclists together and sets buffer p1 to the result before -** returning. -** -** If an error occurs, an error code is left in p->rc. If an error has -** already occurred, this function is a no-op. +** Array aBuf[] contains nBuf doclists. These are all merged in with the +** doclist in buffer p1. */ static void fts5MergePrefixLists( Fts5Index *p, /* FTS5 backend object */ Fts5Buffer *p1, /* First list to merge */ - Fts5Buffer *p2 /* Second list to merge */ + int nBuf, /* Number of buffers in array aBuf[] */ + Fts5Buffer *aBuf /* Other lists to merge in */ ){ - if( p2->n ){ - i64 iLastRowid = 0; - Fts5DoclistIter i1; - Fts5DoclistIter i2; - Fts5Buffer out = {0, 0, 0}; - Fts5Buffer tmp = {0, 0, 0}; +#define fts5PrefixMergerNextPosition(p) \ + sqlite3Fts5PoslistNext64((p)->aPos,(p)->iter.nPoslist,&(p)->iOff,&(p)->iPos); +#define FTS5_MERGE_NLIST 16 + PrefixMerger aMerger[FTS5_MERGE_NLIST]; + PrefixMerger *pHead = 0; + int i; + int nOut = 0; + Fts5Buffer out = {0, 0, 0}; + Fts5Buffer tmp = {0, 0, 0}; + i64 iLastRowid = 0; - /* The maximum size of the output is equal to the sum of the two - ** input sizes + 1 varint (9 bytes). The extra varint is because if the - ** first rowid in one input is a large negative number, and the first in - ** the other a non-negative number, the delta for the non-negative - ** number will be larger on disk than the literal integer value - ** was. - ** - ** Or, if the input position-lists are corrupt, then the output might - ** include up to 2 extra 10-byte positions created by interpreting -1 - ** (the value PoslistNext64() uses for EOF) as a position and appending - ** it to the output. This can happen at most once for each input - ** position-list, hence two 10 byte paddings. */ - if( sqlite3Fts5BufferSize(&p->rc, &out, p1->n + p2->n + 9+10+10) ) return; - fts5DoclistIterInit(p1, &i1); - fts5DoclistIterInit(p2, &i2); - - while( 1 ){ - if( i1.iRowid<i2.iRowid ){ - /* Copy entry from i1 */ - fts5MergeAppendDocid(&out, iLastRowid, i1.iRowid); - fts5BufferSafeAppendBlob(&out, i1.aPoslist, i1.nPoslist+i1.nSize); - fts5DoclistIterNext(&i1); - if( i1.aPoslist==0 ) break; - assert( out.n<=((i1.aPoslist-p1->p) + (i2.aPoslist-p2->p)+9+10+10) ); - } - else if( i2.iRowid!=i1.iRowid ){ - /* Copy entry from i2 */ - fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid); - fts5BufferSafeAppendBlob(&out, i2.aPoslist, i2.nPoslist+i2.nSize); - fts5DoclistIterNext(&i2); - if( i2.aPoslist==0 ) break; - assert( out.n<=((i1.aPoslist-p1->p) + (i2.aPoslist-p2->p)+9+10+10) ); - } - else{ - /* Merge the two position lists. */ - i64 iPos1 = 0; - i64 iPos2 = 0; - int iOff1 = 0; - int iOff2 = 0; - u8 *a1 = &i1.aPoslist[i1.nSize]; - u8 *a2 = &i2.aPoslist[i2.nSize]; - int nCopy; - u8 *aCopy; - - i64 iPrev = 0; - Fts5PoslistWriter writer; - memset(&writer, 0, sizeof(writer)); - - /* See the earlier comment in this function for an explanation of why - ** corrupt input position lists might cause the output to consume - ** at most 20 bytes of unexpected space. */ - fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid); - fts5BufferZero(&tmp); - sqlite3Fts5BufferSize(&p->rc, &tmp, - i1.nPoslist + i2.nPoslist + 10 + 10 + FTS5_DATA_ZERO_PADDING - ); - if( p->rc ) break; - - sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1, &iPos1); - sqlite3Fts5PoslistNext64(a2, i2.nPoslist, &iOff2, &iPos2); - assert_nc( iPos1>=0 && iPos2>=0 ); - - if( iPos1<iPos2 ){ - sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos1); - sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1, &iPos1); - }else{ - sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos2); - sqlite3Fts5PoslistNext64(a2, i2.nPoslist, &iOff2, &iPos2); - } - if( iPos1>=0 && iPos2>=0 ){ - while( 1 ){ - if( iPos1<iPos2 ){ - if( iPos1!=iPrev ){ - sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos1); - } - sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1, &iPos1); - if( iPos1<0 ) break; - }else{ - assert_nc( iPos2!=iPrev ); - sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos2); - sqlite3Fts5PoslistNext64(a2, i2.nPoslist, &iOff2, &iPos2); - if( iPos2<0 ) break; - } - } - } - - if( iPos1>=0 ){ - if( iPos1!=iPrev ){ - sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos1); - } - aCopy = &a1[iOff1]; - nCopy = i1.nPoslist - iOff1; - }else{ - assert_nc( iPos2>=0 && iPos2!=iPrev ); - sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos2); - aCopy = &a2[iOff2]; - nCopy = i2.nPoslist - iOff2; - } - if( nCopy>0 ){ - fts5BufferSafeAppendBlob(&tmp, aCopy, nCopy); - } - - /* WRITEPOSLISTSIZE */ - assert_nc( tmp.n<=i1.nPoslist+i2.nPoslist ); - assert( tmp.n<=i1.nPoslist+i2.nPoslist+10+10 ); - if( tmp.n>i1.nPoslist+i2.nPoslist ){ - if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; - break; - } - fts5BufferSafeAppendVarint(&out, tmp.n * 2); - fts5BufferSafeAppendBlob(&out, tmp.p, tmp.n); - fts5DoclistIterNext(&i1); - fts5DoclistIterNext(&i2); - assert_nc( out.n<=(p1->n+p2->n+9) ); - if( i1.aPoslist==0 || i2.aPoslist==0 ) break; - assert( out.n<=((i1.aPoslist-p1->p) + (i2.aPoslist-p2->p)+9+10+10) ); - } - } - - if( i1.aPoslist ){ - fts5MergeAppendDocid(&out, iLastRowid, i1.iRowid); - fts5BufferSafeAppendBlob(&out, i1.aPoslist, i1.aEof - i1.aPoslist); - } - else if( i2.aPoslist ){ - fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid); - fts5BufferSafeAppendBlob(&out, i2.aPoslist, i2.aEof - i2.aPoslist); - } - assert_nc( out.n<=(p1->n+p2->n+9) ); - - fts5BufferFree(p1); - fts5BufferFree(&tmp); - memset(&out.p[out.n], 0, FTS5_DATA_ZERO_PADDING); - *p1 = out; + /* Initialize a doclist-iterator for each input buffer. Arrange them in + ** a linked-list starting at pHead in ascending order of rowid. Avoid + ** linking any iterators already at EOF into the linked list at all. */ + assert( nBuf+1<=sizeof(aMerger)/sizeof(aMerger[0]) ); + memset(aMerger, 0, sizeof(PrefixMerger)*(nBuf+1)); + pHead = &aMerger[nBuf]; + fts5DoclistIterInit(p1, &pHead->iter); + for(i=0; i<nBuf; i++){ + fts5DoclistIterInit(&aBuf[i], &aMerger[i].iter); + fts5PrefixMergerInsertByRowid(&pHead, &aMerger[i]); + nOut += aBuf[i].n; } + if( nOut==0 ) return; + nOut += p1->n + 9 + 10*nBuf; + + /* The maximum size of the output is equal to the sum of the + ** input sizes + 1 varint (9 bytes). The extra varint is because if the + ** first rowid in one input is a large negative number, and the first in + ** the other a non-negative number, the delta for the non-negative + ** number will be larger on disk than the literal integer value + ** was. + ** + ** Or, if the input position-lists are corrupt, then the output might + ** include up to (nBuf+1) extra 10-byte positions created by interpreting -1 + ** (the value PoslistNext64() uses for EOF) as a position and appending + ** it to the output. This can happen at most once for each input + ** position-list, hence (nBuf+1) 10 byte paddings. */ + if( sqlite3Fts5BufferSize(&p->rc, &out, nOut) ) return; + + while( pHead ){ + fts5MergeAppendDocid(&out, iLastRowid, pHead->iter.iRowid); + + if( pHead->pNext && iLastRowid==pHead->pNext->iter.iRowid ){ + /* Merge data from two or more poslists */ + i64 iPrev = 0; + int nTmp = FTS5_DATA_ZERO_PADDING; + int nMerge = 0; + PrefixMerger *pSave = pHead; + PrefixMerger *pThis = 0; + int nTail = 0; + + pHead = 0; + while( pSave && pSave->iter.iRowid==iLastRowid ){ + PrefixMerger *pNext = pSave->pNext; + pSave->iOff = 0; + pSave->iPos = 0; + pSave->aPos = &pSave->iter.aPoslist[pSave->iter.nSize]; + fts5PrefixMergerNextPosition(pSave); + nTmp += pSave->iter.nPoslist + 10; + nMerge++; + fts5PrefixMergerInsertByPosition(&pHead, pSave); + pSave = pNext; + } + + if( pHead==0 || pHead->pNext==0 ){ + p->rc = FTS5_CORRUPT; + break; + } + + /* See the earlier comment in this function for an explanation of why + ** corrupt input position lists might cause the output to consume + ** at most nMerge*10 bytes of unexpected space. */ + if( sqlite3Fts5BufferSize(&p->rc, &tmp, nTmp+nMerge*10) ){ + break; + } + fts5BufferZero(&tmp); + + pThis = pHead; + pHead = pThis->pNext; + sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, pThis->iPos); + fts5PrefixMergerNextPosition(pThis); + fts5PrefixMergerInsertByPosition(&pHead, pThis); + + while( pHead->pNext ){ + pThis = pHead; + if( pThis->iPos!=iPrev ){ + sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, pThis->iPos); + } + fts5PrefixMergerNextPosition(pThis); + pHead = pThis->pNext; + fts5PrefixMergerInsertByPosition(&pHead, pThis); + } + + if( pHead->iPos!=iPrev ){ + sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, pHead->iPos); + } + nTail = pHead->iter.nPoslist - pHead->iOff; + + /* WRITEPOSLISTSIZE */ + assert( tmp.n+nTail<=nTmp ); + if( tmp.n+nTail>nTmp-FTS5_DATA_ZERO_PADDING ){ + if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; + break; + } + fts5BufferSafeAppendVarint(&out, (tmp.n+nTail) * 2); + fts5BufferSafeAppendBlob(&out, tmp.p, tmp.n); + if( nTail>0 ){ + fts5BufferSafeAppendBlob(&out, &pHead->aPos[pHead->iOff], nTail); + } + + pHead = pSave; + for(i=0; i<nBuf+1; i++){ + PrefixMerger *pX = &aMerger[i]; + if( pX->iter.aPoslist && pX->iter.iRowid==iLastRowid ){ + fts5DoclistIterNext(&pX->iter); + fts5PrefixMergerInsertByRowid(&pHead, pX); + } + } + + }else{ + /* Copy poslist from pHead to output */ + PrefixMerger *pThis = pHead; + Fts5DoclistIter *pI = &pThis->iter; + fts5BufferSafeAppendBlob(&out, pI->aPoslist, pI->nPoslist+pI->nSize); + fts5DoclistIterNext(pI); + pHead = pThis->pNext; + fts5PrefixMergerInsertByRowid(&pHead, pThis); + } + } + + fts5BufferFree(p1); + fts5BufferFree(&tmp); + memset(&out.p[out.n], 0, FTS5_DATA_ZERO_PADDING); + *p1 = out; } static void fts5SetupPrefixIter( Fts5Index *p, /* Index to read from */ int bDesc, /* True for "ORDER BY rowid DESC" */ - const u8 *pToken, /* Buffer containing prefix to match */ + int iIdx, /* Index to scan for data */ + u8 *pToken, /* Buffer containing prefix to match */ int nToken, /* Size of buffer pToken in bytes */ Fts5Colset *pColset, /* Restrict matches to these columns */ Fts5Iter **ppIter /* OUT: New iterator */ ){ Fts5Structure *pStruct; Fts5Buffer *aBuf; - const int nBuf = 32; + int nBuf = 32; + int nMerge = 1; - void (*xMerge)(Fts5Index*, Fts5Buffer*, Fts5Buffer*); + void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); void (*xAppend)(Fts5Index*, i64, Fts5Iter*, Fts5Buffer*); if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ xMerge = fts5MergeRowidLists; xAppend = fts5AppendRowid; }else{ + nMerge = FTS5_MERGE_NLIST-1; + nBuf = nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ xMerge = fts5MergePrefixLists; xAppend = fts5AppendPoslist; } @@ -222518,6 +224954,27 @@ static void fts5SetupPrefixIter( int bNewTerm = 1; memset(&doclist, 0, sizeof(doclist)); + if( iIdx!=0 ){ + int dummy = 0; + const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT; + pToken[0] = FTS5_MAIN_PREFIX; + fts5MultiIterNew(p, pStruct, f2, pColset, pToken, nToken, -1, 0, &p1); + fts5IterSetOutputCb(&p->rc, p1); + for(; + fts5MultiIterEof(p, p1)==0; + fts5MultiIterNext2(p, p1, &dummy) + ){ + Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; + p1->xSetOutputs(p1, pSeg); + if( p1->base.nData ){ + xAppend(p, p1->base.iRowid-iLastRowid, p1, &doclist); + iLastRowid = p1->base.iRowid; + } + } + fts5MultiIterFree(p1); + } + + pToken[0] = FTS5_MAIN_PREFIX + iIdx; fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); fts5IterSetOutputCb(&p->rc, p1); for( /* no-op */ ; @@ -222538,13 +224995,21 @@ static void fts5SetupPrefixIter( if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){ for(i=0; p->rc==SQLITE_OK && doclist.n; i++){ - assert( i<nBuf ); - if( aBuf[i].n==0 ){ - fts5BufferSwap(&doclist, &aBuf[i]); - fts5BufferZero(&doclist); - }else{ - xMerge(p, &doclist, &aBuf[i]); - fts5BufferZero(&aBuf[i]); + int i1 = i*nMerge; + int iStore; + assert( i1+nMerge<=nBuf ); + for(iStore=i1; iStore<i1+nMerge; iStore++){ + if( aBuf[iStore].n==0 ){ + fts5BufferSwap(&doclist, &aBuf[iStore]); + fts5BufferZero(&doclist); + break; + } + } + if( iStore==i1+nMerge ){ + xMerge(p, &doclist, nMerge, &aBuf[i1]); + for(iStore=i1; iStore<i1+nMerge; iStore++){ + fts5BufferZero(&aBuf[iStore]); + } } } iLastRowid = 0; @@ -222554,11 +225019,15 @@ static void fts5SetupPrefixIter( iLastRowid = p1->base.iRowid; } - for(i=0; i<nBuf; i++){ + assert( (nBuf%nMerge)==0 ); + for(i=0; i<nBuf; i+=nMerge){ + int iFree; if( p->rc==SQLITE_OK ){ - xMerge(p, &doclist, &aBuf[i]); + xMerge(p, &doclist, nMerge, &aBuf[i]); + } + for(iFree=i; iFree<i+nMerge; iFree++){ + fts5BufferFree(&aBuf[iFree]); } - fts5BufferFree(&aBuf[i]); } fts5MultiIterFree(p1); @@ -222813,6 +225282,7 @@ static int sqlite3Fts5IndexQuery( if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){ int iIdx = 0; /* Index to search */ + int iPrefixIdx = 0; /* +1 prefix index */ if( nToken ) memcpy(&buf.p[1], pToken, nToken); /* Figure out which index to search and set iIdx accordingly. If this @@ -222834,7 +225304,9 @@ static int sqlite3Fts5IndexQuery( if( flags & FTS5INDEX_QUERY_PREFIX ){ int nChar = fts5IndexCharlen(pToken, nToken); for(iIdx=1; iIdx<=pConfig->nPrefix; iIdx++){ - if( pConfig->aPrefix[iIdx-1]==nChar ) break; + int nIdxChar = pConfig->aPrefix[iIdx-1]; + if( nIdxChar==nChar ) break; + if( nIdxChar==nChar+1 ) iPrefixIdx = iIdx; } } @@ -222851,8 +225323,7 @@ static int sqlite3Fts5IndexQuery( }else{ /* Scan multiple terms in the main index */ int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0; - buf.p[0] = FTS5_MAIN_PREFIX; - fts5SetupPrefixIter(p, bDesc, buf.p, nToken+1, pColset, &pRet); + fts5SetupPrefixIter(p, bDesc, iPrefixIdx, buf.p, nToken+1, pColset,&pRet); assert( p->rc!=SQLITE_OK || pRet->pColset==0 ); fts5IterSetOutputCb(&p->rc, pRet); if( p->rc==SQLITE_OK ){ @@ -222925,8 +225396,9 @@ static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){ int n; const char *z = (const char*)fts5MultiIterTerm((Fts5Iter*)pIndexIter, &n); + assert_nc( z || n<=1 ); *pn = n-1; - return &z[1]; + return (z ? &z[1] : 0); } /* @@ -226212,7 +228684,8 @@ static int fts5ApiPhraseFirst( int n; int rc = fts5CsrPoslist(pCsr, iPhrase, &pIter->a, &n); if( rc==SQLITE_OK ){ - pIter->b = &pIter->a[n]; + assert( pIter->a || n==0 ); + pIter->b = (pIter->a ? &pIter->a[n] : 0); *piCol = 0; *piOff = 0; fts5ApiPhraseNext(pCtx, pIter, piCol, piOff); @@ -226271,7 +228744,8 @@ static int fts5ApiPhraseFirstColumn( rc = sqlite3Fts5ExprPhraseCollist(pCsr->pExpr, iPhrase, &pIter->a, &n); } if( rc==SQLITE_OK ){ - pIter->b = &pIter->a[n]; + assert( pIter->a || n==0 ); + pIter->b = (pIter->a ? &pIter->a[n] : 0); *piCol = 0; fts5ApiPhraseNextColumn(pCtx, pIter, piCol); } @@ -226279,7 +228753,8 @@ static int fts5ApiPhraseFirstColumn( int n; rc = fts5CsrPoslist(pCsr, iPhrase, &pIter->a, &n); if( rc==SQLITE_OK ){ - pIter->b = &pIter->a[n]; + assert( pIter->a || n==0 ); + pIter->b = (pIter->a ? &pIter->a[n] : 0); if( n<=0 ){ *piCol = -1; }else if( pIter->a[0]==0x01 ){ @@ -226757,7 +229232,7 @@ static int sqlite3Fts5GetTokenizer( *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); }else{ rc = pMod->x.xCreate( - pMod->pUserData, &azArg[1], (nArg?nArg-1:0), &pConfig->pTok + pMod->pUserData, (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->pTok ); pConfig->pTokApi = &pMod->x; if( rc!=SQLITE_OK ){ @@ -226820,7 +229295,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2020-12-01 16:14:00 a26b6597e3ae272231b96f9982c3bcc17ddec2f2b6eb4df06a224b91089fed5b", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2021-04-02 15:20:15 5d4c65779dab868b285519b19e4cf9d451d50c6048f06f653aa701ec212df45e", -1, SQLITE_TRANSIENT); } /* @@ -231746,9 +234221,9 @@ SQLITE_API int sqlite3_stmt_init( #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB) */ /************** End of stmt.c ************************************************/ -#if __LINE__!=231748 +#if __LINE__!=234223 #undef SQLITE_SOURCE_ID -#define SQLITE_SOURCE_ID "2020-12-01 16:14:00 a26b6597e3ae272231b96f9982c3bcc17ddec2f2b6eb4df06a224b91089falt2" +#define SQLITE_SOURCE_ID "2021-04-02 15:20:15 5d4c65779dab868b285519b19e4cf9d451d50c6048f06f653aa701ec212dalt2" #endif /* Return the source-id for this library */ SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index cc3b023bf..f1c44f1d7 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -124,9 +124,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.34.0" -#define SQLITE_VERSION_NUMBER 3034000 -#define SQLITE_SOURCE_ID "2020-12-01 16:14:00 a26b6597e3ae272231b96f9982c3bcc17ddec2f2b6eb4df06a224b91089fed5b" +#define SQLITE_VERSION "3.35.4" +#define SQLITE_VERSION_NUMBER 3035004 +#define SQLITE_SOURCE_ID "2021-04-02 15:20:15 5d4c65779dab868b285519b19e4cf9d451d50c6048f06f653aa701ec212df45e" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -2116,7 +2116,13 @@ struct sqlite3_mem_methods { ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether triggers are disabled or enabled ** following this call. The second parameter may be a NULL pointer, in -** which case the trigger setting is not reported back. </dd> +** which case the trigger setting is not reported back. +** +** <p>Originally this option disabled all triggers. ^(However, since +** SQLite version 3.35.0, TEMP triggers are still allowed even if +** this option is off. So, in other words, this option now only disables +** triggers in the main database schema or in the schemas of ATTACH-ed +** databases.)^ </dd> ** ** [[SQLITE_DBCONFIG_ENABLE_VIEW]] ** <dt>SQLITE_DBCONFIG_ENABLE_VIEW</dt> @@ -2127,7 +2133,13 @@ struct sqlite3_mem_methods { ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether views are disabled or enabled ** following this call. The second parameter may be a NULL pointer, in -** which case the view setting is not reported back. </dd> +** which case the view setting is not reported back. +** +** <p>Originally this option disabled all views. ^(However, since +** SQLite version 3.35.0, TEMP views are still allowed even if +** this option is off. So, in other words, this option now only disables +** views in the main database schema or in the schemas of ATTACH-ed +** databases.)^ </dd> ** ** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]] ** <dt>SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER</dt> @@ -3500,6 +3512,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** that uses dot-files in place of posix advisory locking. ** <tr><td> file:data.db?mode=readonly <td> ** An error. "readonly" is not a valid option for the "mode" parameter. +** Use "ro" instead: "file:data.db?mode=ro". ** </table> ** ** ^URI hexadecimal escape sequences (%HH) are supported within the path and @@ -3698,7 +3711,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** If the Y parameter to sqlite3_free_filename(Y) is anything other ** than a NULL pointer or a pointer previously acquired from ** sqlite3_create_filename(), then bad things such as heap -** corruption or segfaults may occur. The value Y should be +** corruption or segfaults may occur. The value Y should not be ** used again after sqlite3_free_filename(Y) has been called. This means ** that if the [sqlite3_vfs.xOpen()] method of a VFS has been called using Y, ** then the corresponding [sqlite3_module.xClose() method should also be @@ -7766,7 +7779,8 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_PRNG_SEED 28 #define SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS 29 #define SQLITE_TESTCTRL_SEEK_COUNT 30 -#define SQLITE_TESTCTRL_LAST 30 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_TRACEFLAGS 31 +#define SQLITE_TESTCTRL_LAST 31 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking @@ -10439,6 +10453,14 @@ SQLITE_API int sqlite3session_patchset( */ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); +/* +** CAPI3REF: Query for the amount of heap memory used by a session object. +** +** This API returns the total amount of heap memory in bytes currently +** used by the session object passed as the only argument. +*/ +SQLITE_API sqlite3_int64 sqlite3session_memory_used(sqlite3_session *pSession); + /* ** CAPI3REF: Create An Iterator To Traverse A Changeset ** CONSTRUCTOR: sqlite3_changeset_iter @@ -10541,18 +10563,23 @@ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); ** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this ** is not the case, this function returns [SQLITE_MISUSE]. ** -** If argument pzTab is not NULL, then *pzTab is set to point to a -** nul-terminated utf-8 encoded string containing the name of the table -** affected by the current change. The buffer remains valid until either -** sqlite3changeset_next() is called on the iterator or until the -** conflict-handler function returns. If pnCol is not NULL, then *pnCol is -** set to the number of columns in the table affected by the change. If -** pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change +** Arguments pOp, pnCol and pzTab may not be NULL. Upon return, three +** outputs are set through these pointers: +** +** *pOp is set to one of [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], +** depending on the type of change that the iterator currently points to; +** +** *pnCol is set to the number of columns in the table affected by the change; and +** +** *pzTab is set to point to a nul-terminated utf-8 encoded string containing +** the name of the table affected by the current change. The buffer remains +** valid until either sqlite3changeset_next() is called on the iterator +** or until the conflict-handler function returns. +** +** If pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change ** is an indirect change, or false (0) otherwise. See the documentation for ** [sqlite3session_indirect()] for a description of direct and indirect -** changes. Finally, if pOp is not NULL, then *pOp is set to one of -** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the -** type of change that the iterator currently points to. +** changes. ** ** If no error occurs, SQLITE_OK is returned. If an error does occur, an ** SQLite error code is returned. The values of the output variables may not diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index d1ff40637..5ac957092 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -1676,7 +1676,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) { // // Because default is NORMAL this statement is always executed if err := exec(fmt.Sprintf("PRAGMA synchronous = %s;", synchronousMode)); err != nil { - C.sqlite3_close_v2(db) + conn.Close() return nil, err } @@ -2007,6 +2007,13 @@ func (s *SQLiteStmt) execSync(args []namedValue) (driver.Result, error) { return &SQLiteResult{id: int64(rowid), changes: int64(changes)}, nil } +// Readonly reports if this statement is considered readonly by SQLite. +// +// See: https://sqlite.org/c3ref/stmt_readonly.html +func (s *SQLiteStmt) Readonly() bool { + return C.sqlite3_stmt_readonly(s.s) == 1 +} + // Close the rows. func (rc *SQLiteRows) Close() error { rc.s.mu.Lock() diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go new file mode 100644 index 000000000..c67fa82b1 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go @@ -0,0 +1,21 @@ +// +build sqlite_column_metadata + +package sqlite3 + +/* +#ifndef USE_LIBSQLITE3 +#cgo CFLAGS: -DSQLITE_ENABLE_COLUMN_METADATA +#include <sqlite3-binding.h> +#else +#include <sqlite3.h> +#endif +*/ +import "C" + +// ColumnTableName returns the table that is the origin of a particular result +// column in a SELECT statement. +// +// See https://www.sqlite.org/c3ref/column_database_name.html +func (s *SQLiteStmt) ColumnTableName(n int) string { + return C.GoString(C.sqlite3_column_table_name(s.s, C.int(n))) +} diff --git a/vendor/github.com/vektah/dataloaden/.gitignore b/vendor/github.com/vektah/dataloaden/.gitignore new file mode 100644 index 000000000..40b341e35 --- /dev/null +++ b/vendor/github.com/vektah/dataloaden/.gitignore @@ -0,0 +1,2 @@ +/vendor +/.idea \ No newline at end of file diff --git a/vendor/github.com/vektah/dataloaden/README.md b/vendor/github.com/vektah/dataloaden/README.md new file mode 100644 index 000000000..f5bcb917e --- /dev/null +++ b/vendor/github.com/vektah/dataloaden/README.md @@ -0,0 +1,97 @@ +### The DATALOADer gENerator [![CircleCI](https://circleci.com/gh/Vektah/dataloaden.svg?style=svg)](https://circleci.com/gh/vektah/dataloaden) [![Go Report Card](https://goreportcard.com/badge/github.com/vektah/dataloaden)](https://goreportcard.com/report/github.com/vektah/dataloaden) [![codecov](https://codecov.io/gh/vektah/dataloaden/branch/master/graph/badge.svg)](https://codecov.io/gh/vektah/dataloaden) + +Requires golang 1.11+ for modules support. + +This is a tool for generating type safe data loaders for go, inspired by https://github.com/facebook/dataloader. + +The intended use is in graphql servers, to reduce the number of queries being sent to the database. These dataloader +objects should be request scoped and short lived. They should be cheap to create in every request even if they dont +get used. + +#### Getting started + +From inside the package you want to have the dataloader in: +```bash +go run github.com/vektah/dataloaden UserLoader string *github.com/dataloaden/example.User +``` + +This will generate a dataloader called `UserLoader` that looks up `*github.com/dataloaden/example.User`'s objects +based on a `string` key. + +In another file in the same package, create the constructor method: +```go +func NewUserLoader() *UserLoader { + return &UserLoader{ + wait: 2 * time.Millisecond, + maxBatch: 100, + fetch: func(keys []string) ([]*User, []error) { + users := make([]*User, len(keys)) + errors := make([]error, len(keys)) + + for i, key := range keys { + users[i] = &User{ID: key, Name: "user " + key} + } + return users, errors + }, + } +} +``` + +Then wherever you want to call the dataloader +```go +loader := NewUserLoader() + +user, err := loader.Load("123") +``` + +This method will block for a short amount of time, waiting for any other similar requests to come in, call your fetch +function once. It also caches values and wont request duplicates in a batch. + +#### Returning Slices + +You may want to generate a dataloader that returns slices instead of single values. Both key and value types can be a +simple go type expression: + +```bash +go run github.com/vektah/dataloaden UserSliceLoader string []*github.com/dataloaden/example.User +``` + +Now each key is expected to return a slice of values and the `fetch` function has the return type `[][]*User`. + +#### Using with go modules + +Create a tools.go that looks like this: +```go +// +build tools + +package main + +import _ "github.com/vektah/dataloaden" +``` + +This will allow go modules to see the dependency. + +You can invoke it from anywhere within your module now using `go run github.com/vektah/dataloaden` and +always get the pinned version. + +#### Wait, how do I use context with this? + +I don't think context makes sense to be passed through a data loader. Consider a few scenarios: +1. a dataloader shared between requests: request A and B both get batched together, which context should be passed to the DB? context.Background is probably more suitable. +2. a dataloader per request for graphql: two different nodes in the graph get batched together, they have different context for tracing purposes, which should be passed to the db? neither, you should just use the root request context. + + +So be explicit about your context: +```go +func NewLoader(ctx context.Context) *UserLoader { + return &UserLoader{ + wait: 2 * time.Millisecond, + maxBatch: 100, + fetch: func(keys []string) ([]*User, []error) { + // you now have a ctx to work with + }, + } +} +``` + +If you feel like I'm wrong please raise an issue. diff --git a/vendor/github.com/vektah/dataloaden/appveyor.yml b/vendor/github.com/vektah/dataloaden/appveyor.yml new file mode 100644 index 000000000..ee3ff895d --- /dev/null +++ b/vendor/github.com/vektah/dataloaden/appveyor.yml @@ -0,0 +1,32 @@ +version: "{build}" + +# Source Config + +skip_branch_with_pr: true +clone_folder: c:\projects\dataloaden + +# Build host + +environment: + GOPATH: c:\gopath + GOVERSION: 1.11.5 + PATH: '%PATH%;c:\gopath\bin' + +init: + - git config --global core.autocrlf input + +# Build + +install: + # Install the specific Go version. + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - go version + +build: false +deploy: false + +test_script: + - go generate ./... + - go test -parallel 8 ./... diff --git a/vendor/github.com/vektah/dataloaden/dataloaden.go b/vendor/github.com/vektah/dataloaden/dataloaden.go new file mode 100644 index 000000000..3419286d5 --- /dev/null +++ b/vendor/github.com/vektah/dataloaden/dataloaden.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + "os" + + "github.com/vektah/dataloaden/pkg/generator" +) + +func main() { + if len(os.Args) != 4 { + fmt.Println("usage: name keyType valueType") + fmt.Println(" example:") + fmt.Println(" dataloaden 'UserLoader int []*github.com/my/package.User'") + os.Exit(1) + } + + wd, err := os.Getwd() + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(2) + } + + if err := generator.Generate(os.Args[1], os.Args[2], os.Args[3], wd); err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(2) + } +} diff --git a/vendor/github.com/vektah/dataloaden/licence.md b/vendor/github.com/vektah/dataloaden/licence.md new file mode 100644 index 000000000..e47379a15 --- /dev/null +++ b/vendor/github.com/vektah/dataloaden/licence.md @@ -0,0 +1,7 @@ +Copyright (c) 2017 Adam Scarr + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/vektah/dataloaden/pkg/generator/generator.go b/vendor/github.com/vektah/dataloaden/pkg/generator/generator.go new file mode 100644 index 000000000..ff618e7b3 --- /dev/null +++ b/vendor/github.com/vektah/dataloaden/pkg/generator/generator.go @@ -0,0 +1,163 @@ +package generator + +import ( + "bytes" + "fmt" + "io/ioutil" + "path/filepath" + "regexp" + "strings" + "unicode" + + "github.com/pkg/errors" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/imports" +) + +type templateData struct { + Package string + Name string + KeyType *goType + ValType *goType +} + +type goType struct { + Modifiers string + ImportPath string + ImportName string + Name string +} + +func (t *goType) String() string { + if t.ImportName != "" { + return t.Modifiers + t.ImportName + "." + t.Name + } + + return t.Modifiers + t.Name +} + +func (t *goType) IsPtr() bool { + return strings.HasPrefix(t.Modifiers, "*") +} + +func (t *goType) IsSlice() bool { + return strings.HasPrefix(t.Modifiers, "[]") +} + +var partsRe = regexp.MustCompile(`^([\[\]\*]*)(.*?)(\.\w*)?$`) + +func parseType(str string) (*goType, error) { + parts := partsRe.FindStringSubmatch(str) + if len(parts) != 4 { + return nil, fmt.Errorf("type must be in the form []*github.com/import/path.Name") + } + + t := &goType{ + Modifiers: parts[1], + ImportPath: parts[2], + Name: strings.TrimPrefix(parts[3], "."), + } + + if t.Name == "" { + t.Name = t.ImportPath + t.ImportPath = "" + } + + if t.ImportPath != "" { + p, err := packages.Load(&packages.Config{Mode: packages.NeedName}, t.ImportPath) + if err != nil { + return nil, err + } + if len(p) != 1 { + return nil, fmt.Errorf("not found") + } + + t.ImportName = p[0].Name + } + + return t, nil +} + +func Generate(name string, keyType string, valueType string, wd string) error { + data, err := getData(name, keyType, valueType, wd) + if err != nil { + return err + } + + filename := strings.ToLower(data.Name) + "_gen.go" + + if err := writeTemplate(filepath.Join(wd, filename), data); err != nil { + return err + } + + return nil +} + +func getData(name string, keyType string, valueType string, wd string) (templateData, error) { + var data templateData + + genPkg := getPackage(wd) + if genPkg == nil { + return templateData{}, fmt.Errorf("unable to find package info for " + wd) + } + + var err error + data.Name = name + data.Package = genPkg.Name + data.KeyType, err = parseType(keyType) + if err != nil { + return templateData{}, fmt.Errorf("key type: %s", err.Error()) + } + data.ValType, err = parseType(valueType) + if err != nil { + return templateData{}, fmt.Errorf("key type: %s", err.Error()) + } + + // if we are inside the same package as the type we don't need an import and can refer directly to the type + if genPkg.PkgPath == data.ValType.ImportPath { + data.ValType.ImportName = "" + data.ValType.ImportPath = "" + } + if genPkg.PkgPath == data.KeyType.ImportPath { + data.KeyType.ImportName = "" + data.KeyType.ImportPath = "" + } + + return data, nil +} + +func getPackage(dir string) *packages.Package { + p, _ := packages.Load(&packages.Config{ + Dir: dir, + }, ".") + + if len(p) != 1 { + return nil + } + + return p[0] +} + +func writeTemplate(filepath string, data templateData) error { + var buf bytes.Buffer + if err := tpl.Execute(&buf, data); err != nil { + return errors.Wrap(err, "generating code") + } + + src, err := imports.Process(filepath, buf.Bytes(), nil) + if err != nil { + return errors.Wrap(err, "unable to gofmt") + } + + if err := ioutil.WriteFile(filepath, src, 0644); err != nil { + return errors.Wrap(err, "writing output") + } + + return nil +} + +func lcFirst(s string) string { + r := []rune(s) + r[0] = unicode.ToLower(r[0]) + return string(r) +} diff --git a/vendor/github.com/vektah/dataloaden/pkg/generator/template.go b/vendor/github.com/vektah/dataloaden/pkg/generator/template.go new file mode 100644 index 000000000..48f5ba252 --- /dev/null +++ b/vendor/github.com/vektah/dataloaden/pkg/generator/template.go @@ -0,0 +1,245 @@ +package generator + +import "text/template" + +var tpl = template.Must(template.New("generated"). + Funcs(template.FuncMap{ + "lcFirst": lcFirst, + }). + Parse(` +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package {{.Package}} + +import ( + "sync" + "time" + + {{if .KeyType.ImportPath}}"{{.KeyType.ImportPath}}"{{end}} + {{if .ValType.ImportPath}}"{{.ValType.ImportPath}}"{{end}} +) + +// {{.Name}}Config captures the config to create a new {{.Name}} +type {{.Name}}Config struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []{{.KeyType.String}}) ([]{{.ValType.String}}, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// New{{.Name}} creates a new {{.Name}} given a fetch, wait, and maxBatch +func New{{.Name}}(config {{.Name}}Config) *{{.Name}} { + return &{{.Name}}{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// {{.Name}} batches and caches requests +type {{.Name}} struct { + // this method provides the data for the loader + fetch func(keys []{{.KeyType.String}}) ([]{{.ValType.String}}, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[{{.KeyType.String}}]{{.ValType.String}} + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *{{.Name|lcFirst}}Batch + + // mutex to prevent races + mu sync.Mutex +} + +type {{.Name|lcFirst}}Batch struct { + keys []{{.KeyType}} + data []{{.ValType.String}} + error []error + closing bool + done chan struct{} +} + +// Load a {{.ValType.Name}} by key, batching and caching will be applied automatically +func (l *{{.Name}}) Load(key {{.KeyType.String}}) ({{.ValType.String}}, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a {{.ValType.Name}}. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *{{.Name}}) LoadThunk(key {{.KeyType.String}}) func() ({{.ValType.String}}, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() ({{.ValType.String}}, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &{{.Name|lcFirst}}Batch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() ({{.ValType.String}}, error) { + <-batch.done + + var data {{.ValType.String}} + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *{{.Name}}) LoadAll(keys []{{.KeyType}}) ([]{{.ValType.String}}, []error) { + results := make([]func() ({{.ValType.String}}, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + {{.ValType.Name|lcFirst}}s := make([]{{.ValType.String}}, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + {{.ValType.Name|lcFirst}}s[i], errors[i] = thunk() + } + return {{.ValType.Name|lcFirst}}s, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a {{.ValType.Name}}s. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *{{.Name}}) LoadAllThunk(keys []{{.KeyType}}) (func() ([]{{.ValType.String}}, []error)) { + results := make([]func() ({{.ValType.String}}, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]{{.ValType.String}}, []error) { + {{.ValType.Name|lcFirst}}s := make([]{{.ValType.String}}, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + {{.ValType.Name|lcFirst}}s[i], errors[i] = thunk() + } + return {{.ValType.Name|lcFirst}}s, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *{{.Name}}) Prime(key {{.KeyType}}, value {{.ValType.String}}) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + {{- if .ValType.IsPtr }} + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + {{- else if .ValType.IsSlice }} + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := make({{.ValType.String}}, len(value)) + copy(cpy, value) + l.unsafeSet(key, cpy) + {{- else }} + l.unsafeSet(key, value) + {{- end }} + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *{{.Name}}) Clear(key {{.KeyType}}) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *{{.Name}}) unsafeSet(key {{.KeyType}}, value {{.ValType.String}}) { + if l.cache == nil { + l.cache = map[{{.KeyType}}]{{.ValType.String}}{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *{{.Name|lcFirst}}Batch) keyIndex(l *{{.Name}}, key {{.KeyType}}) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *{{.Name|lcFirst}}Batch) startTimer(l *{{.Name}}) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *{{.Name|lcFirst}}Batch) end(l *{{.Name}}) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} +`)) diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 355b5a456..c26d1d29e 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -15,7 +15,7 @@ // but additional checking functions, most notably Check, verify that // a particular path, version pair is valid. // -// Escaped Paths +// # Escaped Paths // // Module paths appear as substrings of file system paths // (in the download cache) and of web server URLs in the proxy protocol. @@ -55,7 +55,7 @@ // Import paths have never allowed exclamation marks, so there is no // need to define how to escape a literal !. // -// Unicode Restrictions +// # Unicode Restrictions // // Today, paths are disallowed from using Unicode. // @@ -102,9 +102,9 @@ import ( "strings" "unicode" "unicode/utf8" + "errors" "golang.org/x/mod/semver" - errors "golang.org/x/xerrors" ) // A Version (for clients, a module.Version) is defined by a module path and version pair. diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go index ae62feb53..04ec1c8ab 100644 --- a/vendor/golang.org/x/net/bpf/doc.go +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. /* - Package bpf implements marshaling and unmarshaling of programs for the Berkeley Packet Filter virtual machine, and provides a Go implementation of the virtual machine. @@ -21,7 +20,7 @@ access to kernel functions, and while conditional branches are allowed, they can only jump forwards, to guarantee that there are no infinite loops. -The virtual machine +# The virtual machine The BPF VM is an accumulator machine. Its main register, called register A, is an implicit source and destination in all arithmetic @@ -50,7 +49,7 @@ to extensions, which are essentially calls to kernel utility functions. Currently, the only extensions supported by this package are the Linux packet filter extensions. -Examples +# Examples This packet filter selects all ARP packets. @@ -77,6 +76,5 @@ This packet filter captures a random 1% sample of traffic. // Ignore. bpf.RetConstant{Val: 0}, }) - */ package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go index 1ba43101f..f6877f98f 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (arm64 || amd64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux -// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +//go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux +// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x // +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go index aa1b06203..19d46789d 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go index 98be146bc..68dc8ad63 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go @@ -4,22 +4,8 @@ package socket -import "syscall" - func (h *cmsghdr) set(l, lvl, typ int) { h.Len = int32(l) h.Level = int32(lvl) h.Type = int32(typ) } - -func controlHeaderLen() int { - return syscall.CmsgLen(0) -} - -func controlMessageLen(dataLen int) int { - return syscall.CmsgLen(dataLen) -} - -func controlMessageSpace(dataLen int) int { - return syscall.CmsgSpace(dataLen) -} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go index 3dc5def2b..2e94e96f8 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (arm64 || amd64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || zos) -// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +//go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || zos) +// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x // +build aix darwin dragonfly freebsd linux netbsd openbsd zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go index 40ebedab3..0bfcf7afc 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -9,7 +9,9 @@ package socket import ( "net" + "os" "sync" + "syscall" ) type mmsghdrs []mmsghdr @@ -93,22 +95,86 @@ func (p *mmsghdrsPacker) pack(ms []Message, parseFn func([]byte, string) (net.Ad return hs } -var defaultMmsghdrsPool = mmsghdrsPool{ +// syscaller is a helper to invoke recvmmsg and sendmmsg via the RawConn.Read/Write interface. +// It is reusable, to amortize the overhead of allocating a closure for the function passed to +// RawConn.Read/Write. +type syscaller struct { + n int + operr error + hs mmsghdrs + flags int + + boundRecvmmsgF func(uintptr) bool + boundSendmmsgF func(uintptr) bool +} + +func (r *syscaller) init() { + r.boundRecvmmsgF = r.recvmmsgF + r.boundSendmmsgF = r.sendmmsgF +} + +func (r *syscaller) recvmmsg(c syscall.RawConn, hs mmsghdrs, flags int) (int, error) { + r.n = 0 + r.operr = nil + r.hs = hs + r.flags = flags + if err := c.Read(r.boundRecvmmsgF); err != nil { + return r.n, err + } + if r.operr != nil { + return r.n, os.NewSyscallError("recvmmsg", r.operr) + } + return r.n, nil +} + +func (r *syscaller) recvmmsgF(s uintptr) bool { + r.n, r.operr = recvmmsg(s, r.hs, r.flags) + return ioComplete(r.flags, r.operr) +} + +func (r *syscaller) sendmmsg(c syscall.RawConn, hs mmsghdrs, flags int) (int, error) { + r.n = 0 + r.operr = nil + r.hs = hs + r.flags = flags + if err := c.Write(r.boundSendmmsgF); err != nil { + return r.n, err + } + if r.operr != nil { + return r.n, os.NewSyscallError("sendmmsg", r.operr) + } + return r.n, nil +} + +func (r *syscaller) sendmmsgF(s uintptr) bool { + r.n, r.operr = sendmmsg(s, r.hs, r.flags) + return ioComplete(r.flags, r.operr) +} + +// mmsgTmps holds reusable temporary helpers for recvmmsg and sendmmsg. +type mmsgTmps struct { + packer mmsghdrsPacker + syscaller syscaller +} + +var defaultMmsgTmpsPool = mmsgTmpsPool{ p: sync.Pool{ New: func() interface{} { - return new(mmsghdrsPacker) + tmps := new(mmsgTmps) + tmps.syscaller.init() + return tmps }, }, } -type mmsghdrsPool struct { +type mmsgTmpsPool struct { p sync.Pool } -func (p *mmsghdrsPool) Get() *mmsghdrsPacker { - return p.p.Get().(*mmsghdrsPacker) +func (p *mmsgTmpsPool) Get() *mmsgTmps { + return p.p.Get().(*mmsgTmps) } -func (p *mmsghdrsPool) Put(packer *mmsghdrsPacker) { - p.p.Put(packer) +func (p *mmsgTmpsPool) Put(tmps *mmsgTmps) { + p.p.Put(tmps) } diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go index c9c592ddb..42411affa 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (arm64 || amd64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux -// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +//go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux +// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x // +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go index 3fcb51b38..8f79b38f7 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -9,32 +9,23 @@ package socket import ( "net" - "os" ) func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { for i := range ms { ms[i].raceWrite() } - packer := defaultMmsghdrsPool.Get() - defer defaultMmsghdrsPool.Put(packer) + tmps := defaultMmsgTmpsPool.Get() + defer defaultMmsgTmpsPool.Put(tmps) var parseFn func([]byte, string) (net.Addr, error) if c.network != "tcp" { parseFn = parseInetAddr } - hs := packer.pack(ms, parseFn, nil) - var operr error - var n int - fn := func(s uintptr) bool { - n, operr = recvmmsg(s, hs, flags) - return ioComplete(flags, operr) - } - if err := c.c.Read(fn); err != nil { + hs := tmps.packer.pack(ms, parseFn, nil) + n, err := tmps.syscaller.recvmmsg(c.c, hs, flags) + if err != nil { return n, err } - if operr != nil { - return n, os.NewSyscallError("recvmmsg", operr) - } if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { return n, err } @@ -45,25 +36,17 @@ func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { for i := range ms { ms[i].raceRead() } - packer := defaultMmsghdrsPool.Get() - defer defaultMmsghdrsPool.Put(packer) + tmps := defaultMmsgTmpsPool.Get() + defer defaultMmsgTmpsPool.Put(tmps) var marshalFn func(net.Addr, []byte) int if c.network != "tcp" { marshalFn = marshalInetAddr } - hs := packer.pack(ms, nil, marshalFn) - var operr error - var n int - fn := func(s uintptr) bool { - n, operr = sendmmsg(s, hs, flags) - return ioComplete(flags, operr) - } - if err := c.c.Write(fn); err != nil { + hs := tmps.packer.pack(ms, nil, marshalFn) + n, err := tmps.syscaller.sendmmsg(c.c, hs, flags) + if err != nil { return n, err } - if operr != nil { - return n, os.NewSyscallError("sendmmsg", operr) - } if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { return n, err } diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go new file mode 100644 index 000000000..af964e617 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go @@ -0,0 +1,13 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 +// +build loong64 + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go new file mode 100644 index 000000000..6a94fec2c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go @@ -0,0 +1,40 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +//go:build loong64 +// +build loong64 + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_0 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go index 59b71da57..4c19269be 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc.go @@ -4,32 +4,32 @@ package socket type iovec struct { - Base *byte - Len uint32 + Base *byte + Len uint32 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type mmsghdr struct { - Hdr msghdr - Len uint32 + Hdr msghdr + Len uint32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x8 - sizeofMsghdr = 0x1c + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c ) diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go index 245834979..6fbdc52b9 100644 --- a/vendor/golang.org/x/net/ipv4/doc.go +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -16,8 +16,7 @@ // 3376. // Source-specific multicast is defined in RFC 4607. // -// -// Unicasting +// # Unicasting // // The options for unicasting are available for net.TCPConn, // net.UDPConn and net.IPConn which are created as network connections @@ -51,8 +50,7 @@ // }(c) // } // -// -// Multicasting +// # Multicasting // // The options for multicasting are available for net.UDPConn and // net.IPConn which are created as network connections that use the @@ -141,8 +139,7 @@ // } // } // -// -// More multicasting +// # More multicasting // // An application that uses PacketConn or RawConn may join multiple // multicast groups. For example, a UDP listener with port 1024 might @@ -200,8 +197,7 @@ // // error handling // } // -// -// Source-specific multicasting +// # Source-specific multicasting // // An application that uses PacketConn or RawConn on IGMPv3 supported // platform is able to join source-specific multicast groups. diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go new file mode 100644 index 000000000..e15c22c74 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go @@ -0,0 +1,77 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +//go:build loong64 +// +build loong64 + +package ipv4 + +const ( + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go index 200617ea8..e2fddd645 100644 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -33,9 +33,10 @@ // the last two are not (but share the same eTLD+1: "google.com"). // // All of these domains have the same eTLD+1: -// - "www.books.amazon.co.uk" -// - "books.amazon.co.uk" -// - "amazon.co.uk" +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// // Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". // // There is no closed form algorithm to calculate the eTLD of a domain. diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go index 51f40b8eb..a44423976 100644 --- a/vendor/golang.org/x/net/publicsuffix/table.go +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -2,7 +2,7 @@ package publicsuffix -const version = "publicsuffix.org's public_suffix_list.dat, git revision 792f13d38c795cf910de96de4baac48f1fee3162 (2021-08-23T07:37:32Z)" +const version = "publicsuffix.org's public_suffix_list.dat, git revision 3c213aab32b3c014f171b1673d4ce9b5cd72bf1c (2021-11-26T23:05:53Z)" const ( nodesBitsChildren = 10 @@ -23,510 +23,510 @@ const ( ) // numTLD is the number of top level domains. -const numTLD = 1508 +const numTLD = 1504 // Text is the combined text of all labels. const text = "9guacuiababia-goracleaningroks-theatree164-balsfjordd-dnshome-we" + - "bserverdal-o-g-i-n4t3l3p0rtashkentatamotorsitestinggfarmerseinea" + - "stasiaetnaamesjevuemielnoboribetsucks3-ap-northeast-2ix4432-bals" + - "an-suedtirolkuszczytnoopencraftrainingdyniabruzzoologicalabamaga" + - "sakishimabaraogashimadachicagoboats3-ap-northeast-1kappchizip611" + - "6-b-datacentermezproxyzgorabogadobeaemcloud-fr1337birdartcenterp" + - "risecloudaccesscambridgeiseiroumuencheninohekinannestaditchyouri" + - "parachutingleezebirkenesoddtangenovaranzanquannefrankfurtcmember" + - "s3-website-ap-southeast-2birthplacebitbucketrzyninomiyakonojorpe" + - "landivtasvuodnakamagayahooguyurihonjournalistjohnirasakin-vpncas" + - "inord-odalombardynalias3-website-eu-west-1bjarkoyusuharabjerkrei" + - "mbamblebesbyenvironmentalconservationionjukudoyamaintenancebizen" + - "akanojohanamakinoharaustinnaumburgjerdrumcpeatonsbergjerstadotsu" + - "ruokakamigaharamswatch-and-clockerevistarnbergivestbytemark12bju" + - "gnieznord-frontierblackfridayusuisservehttpbin-butterbloombergba" + - "uernishiazaincheonishigovtcp4bloxcms3-website-sa-east-1bluebitem" + - "p-dnswedenishiharabmoattachments3-website-us-east-1bms3-website-" + - "us-west-1bmwedeployuufcfanishiizunazukindianapolis-a-bloggerbnrw" + - "egroweibolognagareyamakeuparaglidinglitcheltenham-radio-opensoci" + - "alomzaporizhzhegurindianmarketingliwicebomloabathsbcateringebuil" + - "dingloboavistanbulsan-sudtirolondonetskaratsuginamikatagamilanos" + - "hiroomglogoweirbondigitaloceanographicsxboxfordellogliastradingl" + - "oppenzaolbia-tempio-olbiatempioolbialystokkeliwebhostinglugsjcbn" + - "pparibashkiriabonnishikatakazakindielddanuorrindigenaklodzkodair" + - "abookinghostedpictetgoryuzawabookonlinewjerseyboomlajollamerican" + - "expressexyboschaefflerdalondrinamsskoganeindowapblogsiteleafamil" + - "ycompany-2bostik-serverrankoshigayachts3-website-us-west-2boston" + - "akijinsekikogentappsselfiparisor-fronishikatsuragit-reposts-and-" + - "telecommunicationsakyotanabellevuelosangelesjabbottjeldsundivtta" + - "svuotnakamurataiwanairforcechireadthedocscbgmbhartipschlesisches" + - "alangenishikawazukamisatohoboleslawieconomiastalowa-wolawabotani" + - "calgardeno-stagingminakamichiharabotanicgardenishimerabotanycath" + - "olicaxiaskimitsubatamibudejjuegoshikievennodesabaerobaticketsalo" + - "n-1bouncemerckmsdnipropetrovskjervoyageometre-experts-comptables" + - "altdalorenskogmodellingmxenishinomiyashironoddabounty-fullensake" + - "rrypropertiesaludiyboutiquebecommerce-shopitsitempurlotteboutire" + - "serve-onlinewmexicodyn-o-saurlandesalvadordalibabalena-devicesal" + - "zburgretakamatsukawabozen-sudtirolottokonamegatakatsukindustriab" + - "ozen-suedtirolouvrehabmerbplaceducatorprojectjmaxxxeroxfinitybra" + - "ndywinevalleybrasiliabresciabrindisibenikikugawashtenawdevcdnacc" + - "essobetsuitaijindustriesteamfamberkeleybristoloseyouriparliament" + - "jomelhusgardenishinoomotegohtawaramotoineppubtlsamegawabritishco" + - "lumbialowiezaganishinoshimatsunowruzhgorodeomniweatherchannelpus" + - "ercontentjxjampalacebroadcastlebtimnetzjavaldaostathelleluxembou" + - "rgrimstadrangedalublindesnesamnangerbroadwaybroke-itvedestrandra" + - "y-dnstracebrokerbronnoysundrayddns5ybrothermesaverdealerbrowsers" + - "afetymarketsampaleomurabrumunddalucaniabrunelastxn--0trq7p7nnish" + - "iokoppegardraydnsupdaterbrusselsamsclubartowellbeingzonebruxelle" + - "samsungripebryanskodjedugit-pagespeedmobilizeroticagliaricoharuh" + - "rbrynewportkmaxxn--11b4c3drivegarsheiheijinfinitinsureggioemilia" + - "romagnamsosnowiechernivtsiciliabuskerudrobaknoluoktachikawafflec" + - "elluciancrmadviseharag-cloud-charitychyattorneyagawakayamagazine" + - "kobayashikaoirmitakeharagusartsandnessjoenishitosashimizunaminam" + - "ibosognebuzentsujiiebuzzlgriwataraidrrbwesteuropenairbusantiques" + - "t-a-la-maisondre-landroidrudupontariobranconavstackareliancebzhi" + - "tomirbzzwestfalenishiwakintelligencecoloradoplateaudiopsysanokar" + - "umaifarsundyndns-homednsantabarbaracolumbusheycommunecommunity-p" + - "rochowicecomobaracomparemarkerryhotelsantacruzsantafedjeffersonc" + - "omsecaaskoyabearalvahkihokumakogenebakkeshibechambagriculturenne" + - "bugattiffanyaarborteaches-yogasawaracingrpartinternationalfirear" + - "msantamariakecondoshichinohealth-carereformemergencyahikobeardub" + - "aiduckdnsncfdyndns-ipartsantoandreamhostersanukintuitoyosatoyoka" + - "waconferenceconstructionconsuladoesntexisteingeekasaokamikitayam" + - "atsurinuyamashinatsukigatakasagotpantheonsiteconsultanthropology" + - "consultingruenoharacontactoyotapartycontagematsubaracontemporary" + - "arteducationalchikugodogadollsaobernardocontractorskenconventure" + - "shinodearthruherecifedexetercookingchannelsdvrdnsdojoburguidefen" + - "seljordyndns-mailcoolcooperativano-frankivskygearappasadenarashi" + - "nocopenhagencyclopedichofunatoriginstantcloudfrontdoorcoproducti" + - "onsaogoncarriercorporationcorsicahcesuoloansaotomeiwamashikokuch" + - "uocorvettenrissadonnagatorogersvp4cosenzakopanelblagrarchaeology" + - "eongbuk0cosidnsfor-better-thanawatchesapporocostumedicaltanisset" + - "taishinomakindlefrakkestadyndns-office-on-the-webercouchpotatofr" + - "iesardegnaroycoukashibatakasugainvestmentsardiniacouncilcouponsa" + - "rlcozoracqcxn--12cfi8ixb8lcranbrookuwanalyticsarpsborguitarsaruf" + - "utsunomiyawakasaikaitabashijonawatecrdyndns-picsasayamatta-varjj" + - "atoyotomiyazakinzais-a-candidatecreditcardyndns-remotewdyndns-se" + - "rverisigncreditunioncremonashgabadaddjaguarqhachinohedmarkashiha" + - "racrewhalingujoinvilleirvikashiwaracricketoyotsukaidocrimeast-ka" + - "zakhstanangercrotonecrownipassagensasebofagemologicallynxn--12co" + - "0c3b4evalled-aostamayufuettertdasnetzcrsaskatchewancruisesassari" + - "s-a-caterercuisinellancashirecipescaracalvinklein-berlindaskvoll" + - "pagesaudaculturalcentertainmentoyouracuneocupcakecuritibaghdadul" + - "toystre-slidrettozawacurvalledaostaobaomoriguchiharahkkeravjuedi" + - "schesapeakebayernuorochestercymruovatmallorcafederationcilla-spe" + - "ziacyonabarumemorialcyouthachiojiyaizuwakamatsubushikusakadogawa" + - "ferrarivneferrerotikagoshimalopolskanlandyndns-workshoparenakani" + - "ikawatanagurafetsundyndns1fgushikamifuranordreisa-hockeynutsirac" + - "usaintlouis-a-anarchistoireggio-emilia-romagnakatombetsumitakagi" + - "izefhvalerfidoomdnsiskinkyotobetsulikes-piedmonticellocalzonefie" + - "ldynnsauheradyndns-webhareidsbergentingulenfigueresinstagingwidd" + - "leitungsenfilateliafilegear-audnedalnfilegear-dealstahaugesunder" + - "seaportsinfolionetworkangerfilegear-gbizfilegear-iefilegear-jpmo" + - "rganfilegear-sg-1filminamiechizenfinalfinancefineartschulefinlan" + - "dynservebbsavannahgafinnoyfirebaseappatriafirenetrani-andria-bar" + - "letta-trani-andriafirenzefirestonefirewebhopocznordre-landynulme" + - "msettlersaveincloudyndns-wikirafirmdalegnicapebretonamicrolighti" + - "ngxn--1ctwolominamatargithubusercontentraniandriabarlettatranian" + - "driafishingokasellfylkesbiblackbaudcdn-edgestackhero-networkingg" + - "roupowiathletajimageandsoundandvision-riopretochigiessensiositel" + - "evision-webpaashorokanaiefitjarvodkagaminogatagajobojis-a-chefas" + - "tly-terrariumetacentrumeteorappassenger-associationfitnessettlem" + - "entranoyfjalerflekkefjordynv6flesbergflickragerokunohealthcareer" + - "schulserverflirfloginlinefloraflorencefloridatsunanjoetsuwanouch" + - "ikujogaszkolancasterfloripaderbornfloristanohatajimidsundynvpnpl" + - "us-4floromskogflowerschwarzgwangjuifminamifuranowtvallee-aostero" + - "yfltranslateflynnhosting-clusterfndyroyrvikingunmaniwakuratefnwk" + - "asukabedzin-addrammenuniversityfoodnetworkdalfor-ourfor-somedio-" + - "campidano-mediocampidanomediofor-theaterforexrothachirogatakamor" + - "iokakudamatsueforgotdnschweizforli-cesena-forlicesenaforlikescan" + - "dyn53forsalegoldpoint2thisamitsukeforsandasuoloftransportefortal" + - "fortextileikangerfortmissoulanciafortworthadanore-og-uvdalfosnes" + - "ciencecentersciencehistoryfotransurlfoxafozfranamizuhobby-sitefr" + - "ancaiserniafranziskanerimaringatlantajiris-a-conservativegascien" + - "tistordalfredrikstadtvscjohnsonfreeddnsfreebox-oscrapper-sitefre" + - "edesktoppdalfreemasonryfreemyiphosteurovisionfreesitefreetlscrap" + - "pingfreiburgfreseniusculturecreationfribourgfriuli-v-giuliafriul" + - "i-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiu" + - "liafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriu" + - "livenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganscrysec" + - "uritytacticservehumourfrognfrolandfrom-akrehamnfrom-alfrom-arfro" + - "m-azimuthatogayabukijobservableusercontentrapaniizafrom-capetown" + - "news-stagingfrom-coffeedbackplaneappaviancargodaddyn-vpndnservei" + - "rchonanbulsan-suedtirolucernefrom-ctravelchannelfrom-dchoseikaru" + - "gamvikarlsoyfrom-deatnunjargafrom-flanderserveminecraftravelersi" + - "nsurancefrom-gaulardalfrom-hichisodegaurafrom-iafrom-idfrom-ilfr" + - "om-in-brbar1from-kservemp3from-kyowariasahikawafrom-langevagrige" + - "ntomologyeonggiehtavuoatnabudapest-a-la-masion-rancherkasydneyfr" + - "om-malselvendrelluciancrmadvancefrom-mdfrom-medizinhistorischese" + - "rvep2pfizerfrom-mifunefrom-mnfrom-modalenfrom-mservepicservequak" + - "efrom-mtnfrom-nctulanservesarcasmatartanddesignfrom-ndfrom-nefro" + - "m-nhktrdfrom-njservicesevastopolefrom-nminamiiseoullensvanguardf" + - "rom-nvallee-d-aosteigenfrom-nynysagaeroclubmedecincinnativeameri" + - "canantiquest-mon-blogueurodirumaceratabuseating-organichoshibuya" + - "habackyardsandvikcoromantovalle-daostavangerfrom-ohdattorelayfro" + - "m-oketogolffansevenassisicilyfrom-orfrom-padoval-daostavalleyfro" + - "m-pratogurafrom-ris-a-cpadualstackasumigaurayasudafrom-schoenbru" + - "nnfrom-sdscloudfrom-tnfrom-txn--1lqs03nfrom-utazurestaticappspac" + - "eusercontentrendhostingfrom-vald-aostarostwodzislawhoswholdingsm" + - "all-webredirectmeeresistancefrom-vtrentin-sud-tirolfrom-wafrom-w" + - "iardwebspacefrom-wvalleeaosteinkjerusalembroideryfrom-wyfrosinon" + - "efrostaplesewienfroyaitakahamalvikasuyanaizuerichardlillesandefj" + - "ordfruskydivingfstcgroupgfoggiafujiiderafujikawaguchikonefujimin" + - "okamoenairguardiannakadomarinebraskaunicommbankaszubyfujinomiyad" + - "attowebcampinashikiminohostfoldnavyfujiokayamamurogawafujisatosh" + - "onairlinedre-eikerfujisawafujishiroishidakabiratoridebianfujitsu" + - "rugashimandalfujiyoshidavvenjargap-northeast-3fukayabeatsharis-a" + - "-cubicle-slavellinodeobjectsharpharmacienshawaiijimarburgfukuchi" + - "yamadavvesiidappnodebalancertificationfukudomigawafukuis-a-democ" + - "ratrentin-sudtirolfukumitsubishigakirovogradoyfukuokazakiryuohku" + - "rafukuroishikarikaturindalfukusakisarazure-mobileirfjordfukuyama" + - "gatakaharussiafunabashiriuchinadazaifudaigojomedicinaharimalbork" + - "atowicefunagatakahashimamakishiwadafunahashikamiamakusatsumasend" + - "aisennangonohejis-a-designerfundaciofunkfeuerfuoiskujukuriyamang" + - "ooglecodespotrentin-sued-tirolfuosskoczowiiheyakumodernfurniture" + - "ggio-calabriafurubirafurudonostiaafurukawairportland-4-salernodu" + - "minamiizukaminokawanishiaizubangefusoftwarezzoologyfussagamihara" + - "futabayamaguchinomihachimanagementrentin-suedtirolfutboldlygoing" + - "nowhere-for-morenakasatsunairtelebitbridgestoneen-rootaruis-a-do" + - "ctorfuttsurugimperiafuturecmshellaspeziafuturehostingfuturemaili" + - "ngfvghangglidinghangoutsystemscloudsitehannanmokuizumodenakayama" + - "nnorth-kazakhstanhannorthflankautokeinotairestaurantrentino-a-ad" + - "igehanyuzenhapmircloudletshimojis-a-hard-workershimokawahappounz" + - "enharstadharvestcelebrationhasamansionshimokitayamattelekommunik" + - "ationhasaminami-alpshimonitayanagitappharmacyshimonosekikawahash" + - "banghasudahasura-apphdfcbankazohasvikazteleportlligatrentino-aad" + - "igehatoyamazakitahiroshimanxn--1lqs71dhatsukaichikaiseiyoichippu" + - "betsubetsugarusrcfastlylbanzaicloudappspotagerhattfjelldalhayash" + - "imamotobungotakadagestangeorgeorgiahazuminobusells-for-lesshimos" + - "uwalkis-a-hunterhelsinkitakamiizumisanofidelitysvardontexistmein" + - "-iservebeero-stagehembygdsforbundhemneshimotsukehemsedalhepforge" + - "blockshimotsumayfirstockholmestrandherokusslattuminamimakis-a-kn" + - "ightpointtohnoshoooshikamaishimodateheroyhgtvalleedaostehidorahi" + - "gashiagatsumagoianiahigashichichibunkyonanaoshimakanegasakilatir" + - "onthewifiatlassian-dev-myqnapcloudcontrolledogawarabikomaezakiru" + - "nogiftshinichinanhigashihiroshimanehigashiizumozakitakatakanezaw" + - "ahigashikagawahigashikagurasoedahigashikawakitaaikitakyushuaiahi" + - "gashikurumeetrentino-alto-adigehigashimatsushimaoris-a-landscape" + - "rugiahigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycl" + - "eshinjournalismailillehammerfeste-iphiladelphiaareadmyblogsytehi" + - "gashinarusells-for-ustkanmakitchenhigashinehigashiomitamamurausu" + - "kitamihamadahigashiosakasayamanakakogawahigashishirakawamatakaok" + - "almykiahigashisumiyoshikawaminamiaikitamotosumy-gatewayhigashits" + - "unorthwesternmutualhigashiurawa-mazowszexnetrentino-altoadigehig" + - "ashiyamatokoriyamanashifteditorxn--1qqw23ahigashiyodogawahigashi" + - "yoshinogaris-a-lawyerhiraizumisatohmapartmentshinjukumamotoyamas" + - "hikehirakatashinagawahiranairtrafficplexus-1hirarahiratsukaerutw" + - "entehirayakagehistorichouseshinkamigototalhitachiomiyagildeskali" + - "szhitachiotagophilatelyhitraeumtgeradegreehjartdalhjelmelandhole" + - "ckochikushinonsenergyholidayhomegoodshinshinotsurgeonshalloffame" + - "lbournehomeiphilipsynology-diskstationhomelinkyard-cloudjiffyres" + - "dalhomelinuxn--2m4a15ehomeofficehomesecuritymacaparecidahomesecu" + - "ritypchoyodobashichikashukujitawaravpagexlukowestus2homesenseeri" + - "nghomesklepphoenixn--2scrj9christiansburgrondarhomeunixn--30rr7y" + - "hondahongotembaixadahonjyoitakarazukaluganskazunoticeablevangerh" + - "ornindalhorsells-itrentino-s-tirolhortendofinternet-dnshinshiroh" + - "ospitalhoteleshintokushimahotelwithflightshintomikasaharahotmail" + - "hoyangerhoylandetroitskddiamondshinyoshitomiokamishihoronobeauxa" + - "rtsandcraftshiojirishirifujiedahumanitieshioyandexcloudhurdalhur" + - "umajis-a-liberalhyllestadhyogoris-a-libertarianhyugawarahyundaiw" + - "afuneis-uberleetrentino-sudtirolis-very-badajozis-a-painteractiv" + - "estfoldis-very-evillageis-very-goodyearis-very-niceis-very-sweet" + - "pepperis-with-thebandownloadisleofmanaustdaljenv-arubajddarchite" + - "cturealtorlandjeonnamerikawauejetztrentino-sued-tiroljevnakershu" + - "sdecorativeartshopwarendalenugjewelryjewishartgalleryjfkharkovao" + - "jgorajlljls-sto1jls-sto2jls-sto3jmphxn--32vp30hadselburgjnjaworz" + - "nosegawajoyentrentino-suedtiroljoyokaichibalashovhaebaruericsson" + - "gdalenviknakatsugawajpnjprshoujis-a-personaltrainerjurkoshimizum" + - "akiyosatokaizukamikoaniihamatamakawajimaritimoldell-ogliastrader" + - "koshunantankhmelnitskiyamarugame-hostyhostingkosugekotohiradomai" + - "nstitutekotourakouhokutamakiyosemitekounosupabasembokukiyosunnda" + - "lkouyamarylhurstjordalshalsenkouzushimasfjordenkozagawakozakizun" + - "okunimilitarykozowildlifestylekpnkppspdnshowtimeldalkrasnikahoku" + - "tokamachintaifun-dnsaliashwilliamhillkrasnodarkredstonekrelliank" + - "ristiansandcatsienarutolgakristiansundkrodsheradkrokstadelvalle-" + - "aostatic-accessigdalkryminamioguni5kumanotogawakumatorinotteroyk" + - "umejimashikis-a-republicancerresearchaeologicaliforniakumenantok" + - "igawakunisakis-a-rockstarachowicekunitachiarailwaykunitomigusuku" + - "leuvenetokashikis-a-socialistdlibestadkunneppuboliviajessheimper" + - "trixcdn77-secureggiocalabriakunstsammlungkunstunddesignkuokgroup" + - "ilotsilkhmelnytskyivaporcloudkuregruhostingkurgankurobeepilepsyk" + - "kylvenicekurogimimatakatoris-a-soxfankuroisogndalkuromatsunais-a" + - "-studentalkurotakikawasakis-a-teacherkassyno-dshiraois-a-linux-u" + - "sershirahamatonbetsurgerykushirogawakustanais-a-techietis-a-llam" + - "archeapigeelvinckfh-muensterkusuppliesimple-urlkutchanelverumina" + - "misanrikubetsupplykutnovecoregontrailroadkuzumakis-a-therapistoi" + - "akvafjordkvalsundkvamlidlugolekafjordvagsoygardendoftheinterneth" + - "nologykvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspectr" + - "uminamitanekzmisakis-an-accountantshiraokamitondabayashiogamagor" + - "iziamisasaguris-an-actormisawamisconfusedmishimassivegridmissile" + - "wismillermisugitokorozawamitourismilezajskhplaystation-cloudyclu" + - "stermitoyoakemiuramiyazurecontainerdpolicemiyotamanomjondalenmlb" + - "fanmontrealestatefarmequipmentrentinoa-adigemonza-brianzapposird" + - "almonza-e-della-brianzaptokuyamasudamonzabrianzaramonzaebrianzam" + - "onzaedellabrianzamoonscaleforcemordoviamoriyamatsumaebashikshack" + - "netrentinoaadigemoriyoshiminamiashigaramormonstermoroyamatsumoto" + - "fukemortgagemoscowinbarclaycards3-external-1moseushistorymosjoen" + - "moskeneslupsklabudhabikinokawabarthagakhanamigawamosslzmosvikmps" + - "pbar2moteginowaniigatakahatakaishimogosenmoviemovimientokyotango" + - "uvichungnamdalseidfjordynathomebuiltwithdarkarmoymozilla-iotrent" + - "inoalto-adigemtranbymuginozawaonsenmuikaminoyamaxunispacemukoebe" + - "nhavnmulhouservegame-servermunakatanemuncienciamuosattemupimient" + - "aketomisatomobelementoraymurmansknx-serversicherungmurotorcraftr" + - "entinoaltoadigemusashinoharamuseetrentinos-tirolmuseumvereniging" + - "musicarbonia-iglesias-carboniaiglesiascarboniamutsuzawamy-vigorg" + - "emy-wanggoupilemyactivedirectorymyasustor-elvdalmycdmycloudnsmol" + - "arvikomaganemydattolocalhistorymyddnsgeekgalaxymydissentrentinos" + - "tirolmydobisshikis-an-actresshiratakahagithubpreviewsaikisofukus" + - "himangyshlakasamatsudopaashishikuis-a-musicianmydroboehringerike" + - "mydsmushcdn77-sslingmyeffectrentinosud-tirolmyfastblogermyfirewa" + - "llonieruchomoscienceandindustrynmyforuminamiuonumasoymyfritzmyft" + - "paccessnoasakakinokiamyhome-servermyjinomykolaivarggatrentinosud" + - "tirolmymailermymediapchurchaseljeepsondriodejaneirodoymyokohamam" + - "atsudamypepinbarclays3-fips-us-gov-west-1mypetsokndalmyphotoshib" + - "alatinombrendlyngeniwaizumiotsukumiyamazonawsglobalacceleratorah" + - "imeshimabaridagawakuyachimataikikonaikawachinaganoharamcoachampi" + - "onshiphoptobamadridnbloggerimo-siemenscaledekaasdaburmypictureso" + - "larssonmypsxn--3ds443gmysecuritycamerakermyshopblocksolognemysho" + - "pifymyspreadshoppingmythic-beastsolundbeckomakiyokawaramytis-a-b" + - "ookkeeperspectakasakitashiobaramytuleap-partnersomamyvncircustom" + - "er-ocimdbananarepublic66mywireitrentinosued-tirolplatterpioneerp" + - "lazaplcube-serverplumbingoplurinacionalpodhalepodlasiellaktyubin" + - "skiptveterinairealmpmnpodzonepohlpoivronpokerpokrovskommunalforb" + - "undpoliticarrdpolitiendapolkowicepoltavalle-d-aostaticsopotrenti" + - "nosuedtirolpomorzeszowindmillponpesaro-urbino-pesarourbinopesaro" + - "masvuotnaritakoelnponypordenonepornporsangerporsangugeporsgrunna" + - "nyokoshibahikariwanumatakinouepoznanpraxis-a-bruinsfanprdpresidi" + - "oprgmrprimetelemarkommuneprincipeprivatizehealthinsuranceprofesi" + - "onalprogressivestnesor-odalpromombetsupportrentinsud-tirolproper" + - "typrotectionprotonetrentinsudtirolprudentialpruszkowindowskrakow" + - "innersor-varangerprvcyberlevagangaviikanonjis-an-artistgstageprz" + - "eworskogpugliapulawypupippueblockbusterniiminamiawajikis-an-anar" + - "chistoricalsocietypvhagebostadpvtrentinsued-tirolpwcistrondheimm" + - "obilienissayokkaichiropractichernovtsyncloudurbanamexhibitioniss" + - "andiegomutashinainterhostsolutionsandoypythonanywherepaircraftin" + - "gvollolipopittsburghofficialpzqldqotoyohashimotoolsorfoldqponiat" + - "owadaqslgbtrentinsuedtirolqualifioappiwatequickconnectrentoyonak" + - "agyokutoyakolobrzegersundquicksytesorocabalestrandabergamoarekey" + - "machineustargardquipelementsorreisahayakawakamiichikawamisatotto" + - "ris-an-engineeringqvcitadeliverydyndns-at-homedepotenzamamidorit" + - "togitsumidatlantichirurgiens-dentistes-en-franceswidnikkokonoesw" + - "iebodzin-dsldswiftcoverswinoujscienceandhistoryswissmarterthanyo" + - "usynology-dsouthwest1-uslivinghistorytushuissier-justicetuvallea" + - "ostaverntuxfamilytwmailvevelstadvibo-valentiavibovalentiavideovi" + - "llasphinxn--3e0b707evinnicasacamdvrcampinagrandebuilderschmidtre" + - "-gauldalvinnytsiavipsinaappixolinovirginiavirtual-userveexchange" + - "virtualcloudvirtualservervirtualuserveftpizzavirtueeldomein-vigo" + - "rlicevirtuelvisakegawaviterboknowsitallvivolkenkundenvixn--3hcrj" + - "9civilisationisshingucciprianidyndns-at-workisboringrongausdalut" + - "skarpaczeladzvlaanderenvladikavkazimierz-dolnyvladimirvlogintoyo" + - "nezawavminanovologdanskongsvingervolvolkswagentspjelkavikomorots" + - "ukagawavolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostro" + - "wiecivilizationiyodogawawithyoutuberspacekitagatargetmyipkomforb" + - "arcelonagawalbrzycharternopilawalesundgcanonoichinomiyakehimejib" + - "igawaustraliamuneues3-ap-south-1wiwatsukiyonortonwixsitewloclawe" + - "konskowolayangroupiemontewmcloudwmflabsrhtrevisojamisonwnextdire" + - "ctromsakatakkofuefukihabororosowawoodsideloittevaksdalworse-than" + - "dawowiospydebergwpdevcloudwpenginepoweredwphostedmailwpmucdnplan" + - "tsomnarviikamiokameokamakurazakitaurawpmudeveloperauniterois-cer" + - "tifiedunetlifyis-a-nascarfanwritesthisblogwroclawitdkoninjambylw" + - "tcirclerkstagets-itromsokamogawawtfastvps-serveronakanotoddenwuo" + - "zuwzmiuwajimaxn--45brj9civilwarmiasakuchinotsuchiurakawatchandcl" + - "ockaruizawaxn--45q11clanbibaidarmeniaxn--4gbriminingxn--4it168dx" + - "n--4it797konyvelohmusashimurayamarylandxn--4pvxs4allxn--54b7fta0" + - "ccldmailuxuryxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49cleverapp" + - "stmnxn--5rtq34kooris-a-photographerokuapphotographysioxn--5su34j" + - "936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn" + - "--7t0a264clic20001wwwfarmsteadyndns-blogdnsanfranciscofreakunemu" + - "rorangecloudplatform0xn--80aaa0cvacationsrlxn--80adxhksrvaroyxn-" + - "-80ao21axn--80aqecdr1axn--80asehdbarefootballooningjovikaraganda" + - "ustrheimatunduhrennesoyokosukanramusementdllclstagehirnrtatarant" + - "ours3-ap-southeast-1xn--80aswgxn--80augustowithgoogleapiszxn--8l" + - "tr62kopervikhersonxn--8pvr4uxn--8y0a063axn--90a1aflakstadaokagak" + - "icks-assnasaarlandxn--90a3academiamicable-modemoneyxn--90aeropor" + - "talaheadjudaicadaquestorebaselectritonxn--90aishobarakawagoexn--" + - "90amcdirxn--90azhytomyravendbargainstancempresashibetsukuibmdisc" + - "ourses3-sa-east-1xn--9dbhblg6dietrusteexn--9dbq2axn--9et52uxn--9" + - "krt00axn--andy-iraxn--aroport-byaotsurnadalxn--asky-iraxn--aursk" + - "og-hland-jnbarreauctionfabricafjs3-us-east-2xn--avery-yuasakuhok" + - "ksundxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbstorfjordxn--b" + - "ck1b9a5dre4clicketcloudcontrolappgafanxn--bdddj-mrabdxn--bearalv" + - "hki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--b" + - "idr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyasakaiminatoyookana" + - "zawaxn--bjddar-ptarnobrzegyptianxn--blt-elabourxn--bmlo-grainger" + - "xn--bod-2natalxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-fi" + - "rewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpa" + - "gesquare7xn--brum-voagatrvestre-slidreportrogstadxn--btsfjord-9z" + - "axn--bulsan-sdtirol-nsbarrel-of-knowledgeappleborkarasjohkameyam" + - "atotakadauthgear-stagingjesdalimanowarudaukraanghkembuchikumagay" + - "agawakepnoipirangalsaceobiragrocerybnikeisenbahnatuurwetenschapp" + - "engineastcoastaldefenceastus2xn--c1avgxn--c2br7gxn--c3s14mincomc" + - "astresindevicenzaporizhzhiaxn--cck2b3barrell-of-knowledgecompute" + - "rhistoryofscience-fictionredumbrellaakesvuemieleccemrxn--cckwcxe" + - "tdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-found" + - "ationxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2oxn--corr" + - "eios-e-telecomunicaes-ghc29axn--czr694barsycenterprisesakikuchik" + - "useihicampobassociatest-iservecounterstrikeu-1xn--czrs0try-snowp" + - "lowiczest-le-patronxn--czru2dxn--czrw28barsyonlinewhampshirealty" + - "dalpha-myqnapcloud66xn--d1acj3basicservercelliguriauthgearappspa" + - "cehosted-by-previderhclouddnslivefsnillfjorddnss3-ca-central-1xn" + - "--d1alfaromeoxn--d1atrycloudflareplantationxn--d5qv7z876clickris" + - "inglesangoxn--davvenjrga-y4axn--djrs72d6uyxn--djty4koryokamikawa" + - "nehonbetsurutaharaxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dy" + - "ry-iraxn--e1a4clinichitachinakagawassamukawatarikuzentakatairave" + - "nnagasakimobetsuldaluccaravantaarparmatsuuraxn--eckvdtc9dxn--efv" + - "n9storjcloud-ver-jpchristmasakinderoyxn--efvy88haibarakitahataka" + - "nabeautysfjordxn--ehqz56nxn--elqq16hair-surveillancexn--eveni-0q" + - "a01gaxn--f6qx53axn--fct429kosaigawaxn--fhbeiarnxn--finny-yuaxn--" + - "fiq228c5hstpetersburgxn--fiq64basilicataniauthordalandds3-eu-cen" + - "tral-1xn--fiqs8streamscompute-1xn--fiqz9studioxn--fjord-lraxn--f" + - "jq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbsstudy" + - "namisches-dnsortlandxn--forlcesena-c8axn--fpcrj9c3dxn--frde-gran" + - "drapidstuff-4-salexn--frna-woaraisaijosoyrovigotsukisosakitagawa" + - "xn--frya-hraxn--fzc2c9e2cliniquedapliernewyorkshirebungoonordest" + - "e-idchitosetoeigersundurhamburgroks-thisayamanobeokakegawaxn--fz" + - "ys8d69uvgmailxn--g2xx48clintonoshoesanjotelulubindaluzernxn--gck" + - "r3f0fauskedsmokorsetagayaseralingenoamishirasatogliattis-a-celti" + - "csfanxn--gecrj9clothingdustdatadetectksatxn--12c1fe0bradescotlan" + - "dyndns-freeboxosascoli-picenordkapparochernigovernmentlon-2xn--g" + - "gaviika-8ya47hakatanorfolkebibleksvikatsushikabeebyteapplinzis-a" + - "-financialadvisor-aurdalxn--gildeskl-g0axn--givuotna-8yasugitlab" + - "orxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-gonexn--g" + - "mqw5axn--gnstigbestellen-zvbrplsbxn--3oq18vl8pn36axn--gnstiglief" + - "ern-wobihirosakikamijimatsusakahoginankokubunjis-into-animegurow" + - "nproviderxn--h-2failxn--h1aeghakodatexn--h1ahnxn--h1alizxn--h2br" + - "eg3evenestufftoread-booksnesoruminamiyamashirokawanabelaudibleas" + - "ingxn--h2brj9c8cn-northwest-1xn--h3cuzk1discountysnestuttgartroa" + - "ndinosaurepbodynamic-dnsoundcastronomy-routerxn--hbmer-xqaxn--hc" + - "esuolo-7ya35basketballfinanzgorzeleccogladedyn-berlincolnavigati" + - "onavoizumizakiitatebayashiibahccavuotnagaraholtalenglandiscovery" + - "ggeeu-2xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeast" + - "a-s4accident-prevention-k3surreyxn--hnefoss-q1axn--hobl-iraxn--h" + - "oltlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54" + - "axn--i1b6b1a6a2exn--imr513nxn--indery-fyasuokannamiharuxn--io0a7" + - "is-into-carshisohugheshisuifuelluciancrmrecruitrentino-stirolxn-" + - "-j1adplatformshangrilaquilanxessooxn--j1aefbsbxn--1ck2e1bar0emma" + - "fann-arboretumbriamallamaceiobbcg12038xn--j1ael8batochiokinoshim" + - "aizurugbyglandroverhallahppiacenzachpomorskienavuotnapleskns3-us" + - "-gov-west-1xn--j1amhakonexn--j6w193gxn--jlq480n2rgxn--jlq61u9w7b" + - "atsfjordishakotanayorovnobserverxn--jlster-byatominamidaitomanch" + - "esterxn--jrpeland-54axn--jvr189miniserverxn--k7yn95exn--karmy-yu" + - "axn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dx" + - "n--kltp7dxn--kltx9axn--klty5xn--3pxu8konsulatrobeeldengeluidvare" + - "servdxn--koluokta-7ya57hakubahcavuotnagaivuotnagaokakyotambabydg" + - "oszczecinemagnetnedalipaywhirlxn--kprw13dxn--kpry57dxn--kput3is-" + - "into-cartoonshitaramaxn--krager-gyatsukanoyaltakashimarnardalxn-" + - "-kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49j" + - "devcloudfunctionshizukuishimofusaitoshimatsuzakis-a-patsfanxn--k" + - "snes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsushiroxn--kvnangen-k0axn-" + - "-l-1fairwindsusakis-an-entertainerxn--l1accentureklamborghinikol" + - "aeventsusonoxn--laheadju-7yawaraxn--langevg-jxaxn--lcvr32dxn--ld" + - "ingen-q1axn--leagaviika-52bauhauspostman-echocolatemasekd1xn--le" + - "sund-huaxn--lgbbat1ad8jdfaststacksaves-the-whalessandria-trani-b" + - "arletta-andriatranibarlettaandriaxn--lgrd-poacctrysiljanxn--lhpp" + - "i-xqaxn--linds-pramericanartunespeedpartnerxn--lns-qlavagiskexn-" + - "-loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacngrossetouchih" + - "ayaakasakawaharaxn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-i" + - "raxn--merker-kuaxn--mgb2ddesuzakanagawaxn--mgb9awbfbx-oslocuscou" + - "ntryestateofdelawareclaimsavonarusawaxn--mgba3a3ejtunkongsbergxn" + - "--mgba3a4f16axn--mgba3a4fra1-deltaitogakushimotoganexn--mgba7c0b" + - "bn0axn--mgbaakc7dvfbxostrowwlkpmguovdageaidnulvikashiwazakiwielu" + - "nnerxn--mgbaam7a8hakuis-a-geekatsuyamarshallstatebankaufentigerx" + - "n--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00bellunord-aurdalvda" + - "laskanittedallasalleangaviikadenagahamaroyerxn--mgbai9azgqp6jeju" + - "niperxn--mgbayh7gpalermomahachijolsterxn--mgbbh1a71exn--mgbc0a9a" + - "zcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a" + - "5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskypexn--mgbqly7c" + - "0a67fbcnpyatigorskolecznagasukexn--mgbqly7cvafr-1xn--mgbt3dhdxn-" + - "-mgbtf8flapymnturystykaneyamazoexn--mgbtx2beneventodayombolzano-" + - "altoadigeologyomitanoceanographiqueu-3xn--mgbx4cd0abbvieeexn--mi" + - "x082fedorainfraclouderaxn--mix891fedorapeoplegallodingenxn--mjnd" + - "alen-64axn--mk0axin-the-bandais-into-gamessinazawaxn--mk1bu44cns" + - "annanxn--mkru45is-leetrentino-sud-tirolxn--mlatvuopmi-s4axn--mli" + - "-tlavangenxn--mlselv-iuaxn--moreke-juaxn--mori-qsakuragawaxn--mo" + - "sjen-eyawatahamaxn--mot-tlazioxn--mre-og-romsdal-qqbuseranishiar" + - "itakurashikis-lostre-toteneis-a-nursellsyourhomeftphonefosshirak" + - "okamiminershiranukamisunagawaxn--msy-ula0hakusanagochijiwadefini" + - "mamateramochizukinkobierzycexn--mtta-vrjjat-k7afedoraprojectozsd" + - "elmenhorstalbansaxoxn--muost-0qaxn--mxtq1minisitexn--ngbc5azdxn-" + - "-ngbe9e0axn--ngbrxn--41axn--nit225kosakaerodromegallupaascolipic" + - "eno-ipifony-1xn--nmesjevuemie-tcbalsan-sudtirollagdenesnaaseinet" + - "-freaksuzukananiikappudoxn--nnx388axn--nodessakurais-not-certifi" + - "edxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--ntter" + - "y-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1ache" + - "rnihivgubsuzukis-bytomaritimekeepingxn--o3cw4haldenxn--o3cyx2axn" + - "--od0algxn--od0aq3bentleyonagoyautomotivelandeportexasnesoddenma" + - "rkhangelskjakdnepropetrovskiervaapsteiermarkanzakiwakunigamihokk" + - "aidovre-eikerxn--ogbpf8flatangerxn--oppegrd-ixaxn--ostery-fyaxn-" + - "-osyro-wuaxn--otu796dxn--p1acfeiraquarelleaseeklogeschokokekscho" + - "koladenxn--p1ais-savedxn--pgbs0dhlx3xn--porsgu-sta26fermockasser" + - "versaillescholarshipschoolsztynsettsurfashionxn--pssu33lxn--pssy" + - "2uxn--q7ce6axn--q9jyb4cntmparsannohelplfinancialvivanovoldaxn--q" + - "cka1pmckinseyxn--qqqt11minnesotaketakayamassa-carrara-massacarra" + - "ramassabusinessebykleclerchromediatechnologyxn--qxa6axn--qxamste" + - "rdamnserverbaniaxn--rady-iraxn--rdal-poaxn--rde-ulaxn--rdy-0naba" + - "ris-slickhakassiaxn--rennesy-v1axn--rhkkervju-01aferraraxn--rhol" + - "t-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61" + - "exn--risa-5naturalhistorymuseumcenterxn--risr-iraxn--rland-uuaxn" + - "--rlingen-mxaxn--rmskog-byaxn--rny31halsaitamatsukuris-a-greenxn" + - "--rovu88beppublishproxyonagunicloudiskussionsbereichattanooganor" + - "ddalimitedisrechtranakaiwamizawawsmpplanetariumcpreservationfsho" + - "strodawarautoscanadaeguambulancentralus-2xn--rros-granvindafjord" + - "xn--rskog-uuaxn--rst-0naturalsciencesnaturellesvalbardunloppacif" + - "icitichiryukyuragifuchungbukharaumalatvuopmicrosoftbankariyamein" + - "forumzxn--rsta-framercanvasvcivilaviationissedaluroyxn--rvc1e0am" + - "3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithammarfeastafricapitalon" + - "ewspaperxn--s9brj9collectionxn--sandnessjen-ogbeskidyn-ip24lima-" + - "cityeatselinogradimo-i-rana4u2-localhostrolekaniepce12hpalmasera" + - "ti234xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgn" + - "e-graphoxn--42c2d9axn--skierv-utazasveioxn--skjervy-v1axn--skjk-" + - "soaxn--sknit-yqaxn--sknland-fxaxn--slat-5naturbruksgymnxn--slt-e" + - "labcieszynh-serveblogspotaribeiraogakibichuoxn--smla-hraxn--smna" + - "-gratangentlentapisa-geekoseis-a-playershiftcryptonomichigangwon" + - "xn--snase-nraxn--sndre-land-0cbestbuyshouses3-us-west-1xn--snes-" + - "poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn" + - "--sr-varanger-ggbetainaboxfusejnyanagawalmartattoolforgerockartu" + - "zyoriikarasjokarasuyamarriottaxihuanhlfanhs3-us-west-2xn--srfold" + - "-byaxn--srreisa-q1axn--srum-gratis-a-bulls-fanxn--stfold-9xaxn--" + - "stjrdal-s1axn--stjrdalshalsen-sqbhzcasadelamonedanceu-4xn--stre-" + - "toten-zcbieidskoguchikuzenikonanporocelotenkawaveroykenflfanpach" + - "igasakids3-eu-west-1xn--t60b56axn--tckwebthingsvelvikomvuxn--3bs" + - "t00minamiminowaxn--tiq49xqyjelasticbeanstalkharkivanylvenneslask" + - "errylogisticshizuokamitsuexn--tjme-hraxn--tn0agrinetbankosherbro" + - "okegawaxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tir" + - "ol-rzbielawaltervistainaioirasebastopologyeongnamegawafaicloudin" + - "eat-urlimoliseminevje-og-hornnes3-website-ap-northeast-1xn--tren" + - "tin-sdtirol-7vbiellair-traffic-controlleyoshiokanumazuryukiiyama" + - "nouchikuhokuryugasakitanakagusukumodumeloyalistoragextraspace-to" + - "-rentalstomakomaibaravocatanzarowbq-aurskog-holandingivingjemnes" + - "3-ap-southeast-2xn--trentino-sd-tirol-c3bieszczadygeyachiyodaeje" + - "onbukcoalwaysdatabaseballangenkainanaejrietisalatinabenonicbcn-n" + - "orth-1xn--trentino-sdtirol-szbievat-band-campaniavoues3-eu-west-" + - "2xn--trentinosd-tirol-rzbifukagawashingtondclk3xn--trentinosdtir" + - "ol-7vbigv-infolldalivornow-dns3-website-ap-southeast-1xn--trenti" + - "nsd-tirol-6vbihorologyukuhashimoichinosekigaharaxarnetflixilovec" + - "ollegefantasyleaguernseyokotebetsuikirkenes3-eu-west-3utilities-" + - "1xn--trentinsdtirol-nsbikedaemonmoutheworkpccweddinglassassinati" + - "onalheritagexn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vr" + - "axn--uc0atvestre-totennishiawakuraxn--uc0ay4axn--uist22hamurakam" + - "igoris-a-gurunusualpersonxn--uisz3gxn--unjrga-rtarumizusawaxn--u" + - "nup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtuscanyxn--valle-" + - "d-aoste-ehbodoes-itcouldbeworldxn--valleaoste-e7axn--valledaoste" + - "-ebbvadsoccertmgrazerbaijan-mayengerdalcesvizzeraxn--vard-jraxn-" + - "-vegrshei-c0axn--vermgensberater-ctbitsvn-reposouthcarolinarviko" + - "monoticiashowaxn--vermgensberatung-pwblogoiplatter-appinkomatsus" + - "himarumorimachidaxn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoax" + - "n--vgsy-qoa0jelenia-goraxn--vgu402colognexus-3xn--vhquvestvagoyx" + - "n--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq86" + - "1bilbaokinawashirosatobishimagentositecnologiazurewebsiteshikaga" + - "miishibukawakkanaibetsubamericanfamilydsmynasushiobarackmazeplay" + - "okozebinagisochildrensgardenaval-d-aosta-valleyolasitebinordland" + - "evelopmentatsunobninskaracoldwarszawaustevollillyboltateshinanom" + - "achimkentateyamajudygarlanddnskingitpagefrontappalmspringsakeret" + - "rosnubaltimore-og-romsdalp1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wc" + - "vs22dxn--wgbh1colonialwilliamsburgroundhandlingroznyxn--wgbl6axn" + - "--xhq521billustrationrenderxn--xkc2al3hye2axn--xkc2dl3a5ee0hands" + - "onyxn--y9a3aquariumintereisenxn--yer-znaturhistorischeswidnicart" + - "oonartdecologiaxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--45br5" + - "cylxn--ystre-slidre-ujbiocpanamatsushigexn--zbx025dxn--zf0ao64ax" + - "n--zf0avxlxn--zfr164bipanasonicasertaipeidsvollombardiadembetsuk" + - "ubankaratexnbayxz" + "bservercellikes-piedmonticellocalzoneastasiaetnaamesjevuemielnod" + + "umcpeastcoastaldefenceastus2038birdartcenterprisecloudaccesscamb" + + "ridgeiseiroumuenchenishiazaindielddanuorrindigenamsosnowiecherni" + + "vtsiciliabirkenesoddtangenovaragusarts3-website-eu-west-1birthpl" + + "acebitbucketrzynishigovtatsunocelotenkawabjarkoyoshiokanumazuryu" + + "kindowapblogsiteleafamilycompany-2bjerkreimbaltimore-og-romsdalp" + + "ha-myqnapcloud66bjugnieznorddalombardynalias3-website-sa-east-1b" + + "lackfridayukuhashimoichinosekigaharabloombergbauernishiharabloxc" + + "ms3-website-us-east-1bluebitemasekd1bmoattachments3-website-us-w" + + "est-1bms3-website-us-west-2bmweeklylotteryurihonjournalistjohnis" + + "hiizunazukindustriabnrwegroweibolognagareyamakeupowiathletajimag" + + "eandsoundandvision-riopretochigiftsalangenishikatakatsukindustri" + + "esteamfamberkeleyusuharabomloabaths-heilbronnoysundivttasvuotnak" + + "aniikawatanagurabondigitaloceanspacesalon-1bonnishikatsuragit-re" + + "posts-and-telecommunicationsaltdalomzaporizhzhegurinfinitinsureg" + + "ruhostingloboavistanbulsan-sudtirolondonetskaratsuginamikatagami" + + "hokkaidovre-eikerbookinghostedpictetnedalondrinamsskoganeintelli" + + "gencebookonlinewjerseyusuisservegame-serverboomlajollamericanexp" + + "ressexyuufcfanishikawazukamisatokaizukameyamatotakadaboschaeffle" + + "rdalorenskoglogoweirbostik-serveronagasakikuchikuseihicampobasso" + + "ciatest-iservecounterstrikebostonakijinsekikogentappsselfiparach" + + "utingloppenzaolbia-tempio-olbiatempioolbialystokkeliwebhostinglu" + + "gsjcbnpparibashkiriabotanicalgardeno-stagingmbhartipschlesisches" + + "aludiyuzawabotanicgardenishimerabotanychernovtsyncloudrangedalot" + + "tokorozawabouncemerckmsdnipropetrovskjervoyageometre-experts-com" + + "ptablesalvadordalibabalena-devicesalzburgminakamichiharabounty-f" + + "ullensakerrypropertiesamegawaboutiquebecommerce-shopitsitemp-dns" + + "watch-and-clockerboutireserve-onlinewmexicodyn-o-saurlandesamnan" + + "gerbozen-sudtirolouvreisenishinomiyashironocparaglidingmodelling" + + "mxboxfordelmenhorstalbansampaleoddabozen-suedtirolpusercontentat" + + "toolforgerockartuzybplaceducatorprojectaxihuanishinoomotegohtawa" + + "ramotoineppubtlsamsclubartowellbeingzonebrandywinevalleybrasilia" + + "bresciabrindisibenikikugawashtenawdevcdnaccessobetsuitagajobserv" + + "ableusercontentcmeloyalistoragebristoloseyouriparisor-fronishino" + + "shimatsumotofukebritishcolumbialowiezaganquannefrankfurtcp4broad" + + "castlebtimnetzlgretakaharussiabroadwaybroke-itvedestrandray-dnst" + + "racebrokerbrothermesaverdealerbrowsersafetymarketsamsungrimstadr" + + "ayddns5ybrumunddalublindesnesandnessjoenishiokoppegardraydnsupda" + + "terbrunelastxenishitosashimizunaminamibosognebrusselsandoybruxel" + + "lesandvikcoromantovalle-daostavangerbryanskodjedugit-pagespeedmo" + + "bilizeroticagliaricoharuhrbrynewportgorybuskerudrobaknoluoktachi" + + "kawafflecellclstagehirnishiwakinterhostsolutionsanfranciscofreak" + + "unekobayashikaoirmembersangomniweatherchannelucaniabuzentsujiieb" + + "uzzwesteuropenairbusantiquest-a-la-maisondre-landroidrrbwestfale" + + "nissandiegomurabzhitomirbzzcoloradoplateaudiopsysantacruzsantafe" + + "djeffersoncolumbusheycommunecommunity-prochowicecomobaranzancomp" + + "aremarkerryhotelsantamariakecomsecaaskoyabearalvahkievennodesaba" + + "erobaticketsantoandreamhostersanukintuitjxjavaldaostathellevange" + + "rcondoshichinohealth-carereformemergencyahabaghdadultkmaxxn--0tr" + + "q7p7nnconferenceconstructionconsuladogadollsaobernardoconsultant" + + "hropologyconsultingrossetouchihayaakasakawaharacontactksatxn--11" + + "b4c3dyndns-blogdnsaogoncarriercontagematsubaraumalatvuopmicrosof" + + "tbankasaokamikoaniihamatamakawajimaritimodumemorialcontemporarya" + + "rteducationalchikugodonnagatorogersvp4contractorskenconventuresh" + + "inodearthruherecipescaracalvinklein-berlindaskvollcookingchannel" + + "sdvrdnsdojoetsuwanouchikujogaszkolancashireclaimsaotomeiwamashik" + + "okuchuocoolcooperativano-frankivskygearapparochernigovernmentlon" + + "-2copenhagencyclopedichitosetoeidsvollucernecoproductionsapporoc" + + "orporationcorsicahcesuoloansardegnaroycorvettempurlcosenzakopane" + + "lblagrarchaeologyeongbuk0cosidnsfor-better-thanawatchandclockash" + + "ibatakasakiwakunigamilanotairestaurantmparsardiniacostumedicalta" + + "nissettaipeigersundyndns-freeboxosascoli-picenordlandyndns-homed" + + "nsarlcouchpotatofriesarpsborgroundhandlingroznycoukashiharacounc" + + "ilcouponsarufutsunomiyawakasaikaitabashijonawatecozoravennaharim" + + "alborkashiwaracqcxn--12c1fe0bradescotlandyndns-ipartinuyamashina" + + "tsukigatakaokalmykiacranbrookuwanalyticsxn--12cfi8ixb8lcrdyndns-" + + "mailcreditcardyndns-office-on-the-webercreditunioncremonashgabad" + + "addjaguarqhachinohedmarkashiwazakiwielunnercrewfarsundyndns-pics" + + "asayamatta-varjjatoyosatoyokawacricketoyotapartsasebofagemologic" + + "allynxn--12co0c3b4evalled-aostakinouecrimeast-kazakhstanangercro" + + "tonecrownipartycrsaskatchewancruisesassarinvestmentsaudacuisinel" + + "lancasterculturalcentertainmentoyotomiyazakinzais-a-candidatecun" + + "eocupcakecuritibackyardsauheradyndns-remotewdyndns-serverdalcurv" + + "alledaostakkokonoecymruovatmallorcafederation-webpaashorokanaiec" + + "yonabarumemsettlersavannahgacyouthachiojiyaitakahashimamakisosak" + + "itagawaferraraferrarivneferrerotikagoshimalopolskanlandyndns-wik" + + "irafetsundyndns-workshoparenakanojohanamakinoharafgujoinvilleitu" + + "ngsenfhvalerfidoomdnsiskinkyotobetsulikescandyn53fieldyndns1figu" + + "eresinstagingulenfilateliafilegear-audnedalnfilegear-dealstahaug" + + "esunderseaportsinfolionetworkangerfilegear-gbizfilegear-iefilege" + + "ar-jpmorganfilegear-sg-1filminamifuranofinalfinancefineartschule" + + "finlandynnsaveincloudyndns-webhareidsbergentingrpasadenarashinof" + + "innoyfirebaseappassenger-associationfirenetoyourafirenzefireston" + + "efirewebhopocznordreisa-hockeynutazurestaticappspaceusercontento" + + "ystre-slidrettozawafirmdalegoldpoint2thisamitsukefishingolffansc" + + "hulserverfitjarvodkagaminogiessennanjobojis-a-catererfitnessettl" + + "ementozsdeloittenrissagaeroclubmedecincinnativeamericanantiquest" + + "-mon-blogueurodirumaceratabitorderimo-siemenscaledekaascolipicen" + + "oboribetsuckschwarzgwangjuifminamiiserniafjalerfldrvallee-aoster" + + "oyflekkefjordynservebbsaves-the-whalessandria-trani-barletta-and" + + "riatranibarlettaandriaflesbergunmaniwakurateflickragerokunohealt" + + "hcareerschweizflirfloginlinefloraflorencefloridatsunangojomedici" + + "nakaiwamizawatchesciencecentersciencehistoryfloripaderbornfloris" + + "tanohataitogliattis-a-celticsfanfloromskoguovdageaidnulvikasukab" + + "edzin-addrammenuorochesterflowerscientistordalfltrani-andria-bar" + + "letta-trani-andriaflynnhosting-clusterfndynulmetacentrumeteorapp" + + "assagensavonarusawafnwkasumigaurayasudafoodnetworkdalfor-ourfor-" + + "somedio-campidano-mediocampidanomediofor-theaterforexrothachirog" + + "atakahatakaishimogosenforgotdnscjohnsonforli-cesena-forlicesenaf" + + "orlillehammerfeste-ipatriaforsaleikangerforsandasuoloftraniandri" + + "abarlettatraniandriafortalfortexascrapper-sitefortmissoulanciafo" + + "rtworthadanorfolkebibleluxembourgushikamifuranore-og-uvdalfosnes" + + "crappingwiddleksvikasuyanaizuerichardlillyfotranoyfoxafozfranami" + + "zuhobby-sitextileirfjordynv6francaiseharafranziskanerimaringatla" + + "ntaiwanairforcechireadthedocscbgxn--1ctwolominamataobaomoriguchi" + + "haraffleentry-snowplowiczeladzfredrikstadtvscrysecuritytacticser" + + "vehalflifeinsurancefreeddnsfreebox-oservehttpbin-butterfreedeskt" + + "oppdalfreemasonryfreemyiphosteurovisionfreesitefreetlservehumour" + + "freiburgfreseniuscultureggio-calabriafribourgfriuli-v-giuliafriu" + + "li-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagi" + + "uliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafri" + + "ulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganservei" + + "rchonanbulsan-suedtirolukowestus2frognfrolandynvpnpluscountryest" + + "ateofdelawarecreationfrom-akrehamnfrom-alfrom-arfrom-azimuthatog" + + "ayabukihokumakogenglandyroyrvikingruenoharafrom-capetownnews-sta" + + "gingfrom-coffeedbackplaneappaviancargodaddyn-vpndnserveminecraft" + + "ranslatefrom-ctransportefrom-dchoseikarugamvikariyaltakasagotsuk" + + "isofukushimangyshlakasamatsudopaasnesoddenmarkhangelskjakdneprop" + + "etrovskiervaapsteiermarkarlsoyfrom-deatnuniversityfrom-flanderse" + + "rvemp3from-gaulardalfrom-hichisodegaurafrom-iafrom-idfrom-ilfrom" + + "-in-brbar0from-kservep2pfizerfrom-kyowariasahikawafrom-langevagr" + + "igentomologyeonggiehtavuoatnabudapest-a-la-masion-rancherkasydne" + + "yfrom-malselvendrellfrom-mdfrom-medizinhistorischeservepicserveq" + + "uakefrom-midsundfrom-mnfrom-modalenfrom-mservesarcasmatartanddes" + + "ignfrom-mtnfrom-nchoshibuyachtsanjotelulubindaluroyfrom-ndfrom-n" + + "efrom-nhktransurlfrom-njservicesevastopolefrom-nminamiizukaminok" + + "awanishiaizubangefrom-nvallee-d-aosteigenfrom-nynysagamiharafrom" + + "-ohdattorelayfrom-oketogonohejis-a-chefastly-terrariuminamiechiz" + + "enfrom-orfrom-padoval-daostavalleyfrom-pratogurafrom-ris-a-conse" + + "rvativegasevenassisicilyfrom-schoenbrunnfrom-sdscloudfrom-tnfrom" + + "-txn--1lqs03nfrom-utsiracusaikirovogradoyfrom-vald-aostarostwodz" + + "islawhalingfrom-vtrapaniizafrom-wafrom-wiardwebspacefrom-wvallee" + + "aosteinkjerusalembroideryfrom-wyfrosinonefrostaplesewhoswholding" + + "small-webredirectmeeresistancefroyahooguyfruskydivingfstcgroupgf" + + "oggiafujiiderafujikawaguchikonefujiminokamoenairguardiannakadoma" + + "rineat-urlfujinomiyadattowebcampinashikiminohostfoldnavyfujiokay" + + "amalvikaszubyfujisatoshonairlinebraskaunicommbankatowicefujisawa" + + "fujishiroishidakabiratoridebianfujitsurugashimamurogawafujiyoshi" + + "davvenjargap-northeast-3fukayabeatsharis-a-cpadualstackatsushika" + + "beebyteapplinzis-a-cubicle-slavellinodeobjectsharpharmacienshawa" + + "iijimarburgfukuchiyamadavvesiidappnodebalancertificationfukudomi" + + "gawafukuis-a-democratravelchannelfukumitsubishigakiryuohkurafuku" + + "okazakisarazure-mobileirvikatsuyamarriottravelersinsurancefukuro" + + "ishikarikaturindalfukusakishiwadazaifudaigokaseljordfukuyamagata" + + "jimifunefunabashiriuchinadafunagatajiris-a-designerfunahashikami" + + "amakusatsumasendaisenergyfundaciofunkfeuerfuoiskujukuriyamandalf" + + "uosskoczowienfurnitureggio-emilia-romagnakasatsunairportland-4-s" + + "alernogatabusebastopologyeongnamegawafaicloudinedre-eikerfurubir" + + "afurudonostiaafurukawairtelebitbridgestoneen-rootaruis-a-doctorf" + + "usoftwarezzoologyfussaintlouis-a-anarchistoireggiocalabriafutaba" + + "yamaguchinomihachimanagementrdfutboldlygoingnowhere-for-morenaka" + + "tombetsumitakagiizefuttsurugimperiafuturecmshellaspeziafuturehos" + + "tingfuturemailingfvghangglidinghangoutsystemscloudsitehannanmoku" + + "izumodenakayamansionshimojis-a-greenhannorthwesternmutualhanyuze" + + "nhapmircloudletshimokawahappounjargaharstadharvestcelebrationhas" + + "amanxn--1lqs71dhasaminami-alpshimokitayamattelekommunikationhash" + + "banghasudahasura-appharmacyshimonitayanagitapphdfcbankazohasvika" + + "zteleportlligatrendhostinghatoyamazakitahiroshimaoris-a-gurunusu" + + "alpersonhatsukaichikaiseiyoichippubetsubetsugarunzenhattfjelldal" + + "hayashimamotobungotakadagestangeorgeorgiahazuminobusellfylkesbib" + + "lackbaudcdn-edgestackhero-networkinggroupliguriahelsinkitakamiiz" + + "umisanofidelitysvardontexistmein-iservebeero-stagehembygdsforbun" + + "dhemneshimonosekikawahemsedalhepforgeblockshimosuwalkis-a-hard-w" + + "orkershimotsukeheroyhgtvalleedaostehidorahigashiagatsumagoianiah" + + "igashichichibunkyonanaoshimakanegasakilatironrenderhigashihirosh" + + "imanehigashiizumozakitakatakamoriokakudamatsuehigashikagawahigas" + + "hikagurasoedahigashikawakitaaikitakyushuaiahigashikurumeetrentin" + + "-sud-tirolhigashimatsushimapartmentshimotsumayfirstockholmestran" + + "dhigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshi" + + "nichinanhigashinarusells-for-lesshinjournalismailillesandefjordh" + + "igashinehigashiomitamamurausukitamihamadahigashiosakasayamanakak" + + "ogawahigashishirakawamatakanabeautysfjordhigashisumiyoshikawamin" + + "amiaikitamotosumy-gatewayhigashitsunortonhigashiurawa-mazowszexn" + + "etlifyis-a-hunterhigashiyamatokoriyamanashifteditorxn--1qqw23ahi" + + "gashiyodogawahigashiyoshinogaris-a-knightpointtohoboleslawiecono" + + "miastalowa-wolawawsmpplanetariuminamimakis-a-landscaperugiahirai" + + "zumisatohnoshoooshikamaishimodatehirakatashinagawahiranairtraffi" + + "cplexus-1hirarahiratsukaerusrcfastlylbananarepublic66hirayaizuwa" + + "kamatsubushikusakadogawahistorichouseshinjukumamotoyamasfjordenh" + + "itachiomiyagildeskaliszhitachiotagophiladelphiaareadmyblogsytehi" + + "traeumtgeradell-ogliastraderhjartdalhjelmelandholeckochikushinon" + + "senasakuchinotsuchiurakawaholidayhomegoodshinkamigototalhomeiphi" + + "latelyhomelinkyard-cloudjiffyresdalhomelinuxn--2m4a15ehomeoffice" + + "homesecuritymacaparecidahomesecuritypchoyodobashichikashukujitaw" + + "araholtalenissayokkaichiropractichirurgiens-dentistes-en-franceh" + + "omesenseeringhomesklepphilipsynology-diskstationhomeunixn--2scrj" + + "9christiansburgripehondahongotembaixadahonjyoitakanezawahorninda" + + "lhorsells-for-ustkanmakitaurahortendofinternet-dnshinshinotsurge" + + "onshalloffamelbournehospitalhoteleshinshirohotelwithflightshinto" + + "kushimahotmailhoyangerhoylandetroitskazunoticiashintomikasaharah" + + "umanitieshinyoshitomiokamishihoronobeauxartsandcraftshiojirishir" + + "ifujiedahurdalhurumajis-a-lawyerhyllestadhyogoris-a-liberalhyuga" + + "warahyundaiwafuneis-uberleetrentin-suedtirolis-very-badajozis-a-" + + "nursells-itrentin-sudtirolis-very-evillageis-very-goodyearis-ver" + + "y-niceis-very-sweetpepperis-with-thebandownloadisleofmanaustdalj" + + "env-arubajddarchitecturealtorlandjeonnamerikawauejetztrentino-a-" + + "adigejevnakershusdecorativeartshitaramajewelryjewishartgalleryjf" + + "kharkivanylvenneslaskerrylogisticshizukuishimofusakakinokiajgora" + + "jlljls-sto1jls-sto2jls-sto3jmphoenixn--30rr7yjnjaworznoshiroomgj" + + "oyentrentino-aadigejoyokaichibalashovhadselburgjpnjprshizuokamit" + + "suejurkoshimizumakiyosatokamachintaifun-dnsaliashoujis-a-persona" + + "ltrainerkoshunantankhmelnitskiyamarshallstatebankharkovaokosugek" + + "otohiradomainstitutekotourakouhokutamakiyosemitekounosupabasells" + + "yourhomeftphotographysiokouyamarylandkouzushimarylhurstjordalsha" + + "lsenkozagawakozakiyosunndalkozowiiheyakagekpnkppspbar2krasnikaho" + + "kutokashikizunokunimilitarykrasnodarkredstonekrelliankristiansan" + + "dcatshowakristiansundkrodsheradkrokstadelvalle-aostatic-accessho" + + "wtimeldalkryminamioguni5kumanotteroykumatorinovecoregontrailroad" + + "kumejimashikekumenantokonamegatakashimashikis-a-photographerokus" + + "sldkunisakis-a-playershiftcryptonomichigangwonkunitachiarailwayk" + + "unitomigusukukis-a-republicancerresearchaeologicaliforniakunnepp" + + "uboliviajessheimpertrixcdn77-secureggioemiliaromagnaklodzkodaira" + + "kunstsammlungkunstunddesignkuokgrouphxn--3bst00minamisanrikubets" + + "upplykurehabmerkurgankurobeepilepsykkylvenicekurogimimatakasugai" + + "s-a-rockstarachowicekuroisogndalkuromatsunais-a-socialistdlibest" + + "adkurotakikawasakis-a-soxfankushirogawakustanais-a-studentalkusu" + + "pplieshwildlifestylekutchanelkutnow-dnsienarutomobelementoraykuz" + + "umakis-a-teacherkassyno-dshirakofuefukihabororoshiranukamisunaga" + + "wakvafjordkvalsundkvamlidlugolekafjordvagsoygardendoftheinternet" + + "flixilovecollegefantasyleaguernseykvanangenkvinesdalkvinnheradkv" + + "iteseidatingkvitsoykwpspdnsigdalkzmisasaguris-an-accountantshira" + + "ois-a-linux-usershioyandexcloudmisawamisconfusedmishimassa-carra" + + "ra-massacarraramassabusinessebykleclerchromediatechnologymissile" + + "zajskhmelnytskyivaporcloudmisugitokuyamassivegridmitakeharamitou" + + "rismilemitoyoakemiuramiyazurecontainerdpolicemiyotamanomjondalen" + + "mlbfanmontrealestatefarmequipmentrentino-s-tirolmonza-brianzappo" + + "siiitesilkhplaystation-cloudyclustermonza-e-della-brianzaptokyot" + + "angouvichungnamdalseidfjordurbanamexhibitionissedalutskarmoymonz" + + "abrianzaramonzaebrianzamonzaedellabrianzamoonscaleforcemordoviam" + + "oriyamasudamoriyoshiminamiashigaramormonstermoroyamatsumaebashik" + + "shacknetrentino-stirolmortgagemoscowilliamhillmoseushistorymosjo" + + "enmoskenesimple-urlmossirdalmosviklabudhabikinokawabarthaebaruer" + + "icssongdalenviknakatsugawamoteginowaniigatakahamangooglecodespot" + + "rentino-sud-tirolmoviemovimientolgamozilla-iotrentino-sudtirolmt" + + "ranbymuginozawaonsensiositemuikaminoyamaxunispacemukoebenhavnmul" + + "houseminemunakatanemuncienciamuosattemupiemontemurmanskmpspawnex" + + "tdirectrentino-alto-adigemurotorcraftrentino-sued-tirolmusashino" + + "haramuseetrentino-suedtirolmuseumverenigingmusicarbonia-iglesias" + + "-carboniaiglesiascarboniamutsuzawamy-vigorlicemy-wanggoupilemyac" + + "tivedirectorymyasustor-elvdalmycdmycloudnslupsknx-serversicherun" + + "gmydattolocalhistorymyddnsgeekgalaxymydissentrentinoa-adigemydob" + + "isshikis-an-actormydroboehringerikemydslzmyeffectrentinoaadigemy" + + "fastblogermyfirewallonieruchomoscienceandindustrynmyforuminamita" + + "nemyfritzmyftpaccessmolaquilansmushcdn77-sslingmyhome-servermyji" + + "nomykolaivarggatrentinoalto-adigemymailermymediapchurchaseljeeps" + + "ondriodejaneirodoymyokohamamatsudamypepilotsnoasakataketomisatos" + + "himatsuzakis-an-actresshiraokamitondabayashiogamagoriziamypetsok" + + "ndalmyphotoshibalatinoopencraftrainingmypicturesolarssonmypsxn--" + + "3ds443gmysecuritycamerakermyshopblocksolognemyshopifymyspreadsho" + + "ppingmythic-beastsolundbeckomaganemytis-a-bookkeeperspectakarazu" + + "kaluganskomakiyokawaramytuleap-partnersomamyvncircustomer-ocimdb" + + "amblebesbyeniwaizumiotsukumiyamazonawsglobalacceleratorahimeshim" + + "abaridagawakuyachimataijibmdevelopmentashkentatamotorsitestingla" + + "dedyn-berlincolnavigationavoizumizakiitatebayashiibahccavuotnaga" + + "rag-cloud-charitydalipaywhirlimitedgcanonoichinomiyakebinagisoch" + + "ildrensgardenavuotnapleskns3-eu-west-2mywirepaircraftingvollolip" + + "opimientakayamatsuuraplatter-appinbarcelonagawalbrzycharternopil" + + "awalesundiscountysnes3-eu-west-3utilities-1platterpinkomatsushim" + + "arugame-hostyhostingplazaplcube-serverplumbingoplurinacionalpodh" + + "alepodlasiellaktyubinskiptveterinairealmpmnpodzonepohlpoivronpok" + + "erpokrovskommunalforbundpoliticarrdpolitiendapolkowicepoltavalle" + + "-d-aostaticsopotrentinos-tirolpomorzeszowinbarclaycards3-externa" + + "l-1ponpesaro-urbino-pesarourbinopesaromasvuotnaritakoelnponypord" + + "enonepornporsangerporsangugeporsgrunnanyokoshibahikariwanumataka" + + "zakis-an-artistgstagepoznanpraxis-a-bruinsfanprdpreservationpres" + + "idioprgmrprimetelemarkommuneprincipeprivatizehealthinsuranceprof" + + "esionalprogressivestnesor-odalpromombetsupportrentinostirolprope" + + "rtyprotectionprotonetrentinosud-tirolprudentialpruszkowindmillpr" + + "vcyberlevagangaviikanonjis-an-engineeringprzeworskogpugliapulawy" + + "pupioneerpvhagebostadpvtrentinosudtirolpwcistrondheimmobilieniss" + + "hingucciprianidurhamburgriwataraidynathomebuiltwithdarkarpaczest" + + "-le-patroniyodogawapythonanywherepbodynamic-dnsor-varangerpzqldq" + + "otoyohashimotoolsorfoldqponiatowadaqslgbtrentinosued-tirolqualif" + + "ioappippueblockbusterniiminamiawajikis-an-anarchistoricalsociety" + + "quickconnectrentinosuedtirolquicksytesorocabalestrandabergamoare" + + "keymachineustargardquipelementsorreisahayakawakamiichikawamisato" + + "ttoris-an-entertainerswedenswidnicartoonartdecologiaswidnikkokam" + + "iminersouthcarolinarvikomonotogawaswiebodzin-dslattuminanoswinou" + + "jscienceandhistoryswissmarterthanyoutwentesynology-dsouthwest1-u" + + "slivinghistorytularvikongsbergtunesowatunkongsvingerturystykaney" + + "amazoetuscanytushuissier-justicetuvalleaostaverntuxfamilytwmailv" + + "ibo-valentiavibovalentiavideovillaspectruminamiyamashirokawanabe" + + "laudibleasingvinnicasacamdvrcampinagrandebuilderschmidtre-gaulda" + + "lvinnytsiavipsinaappittsburghofficialvirginiavirtual-userveexcha" + + "ngevirtualcloudvirtualservervirtualuserveftpiwatevirtuelvisakuho" + + "kksundviterboknowsitallvivolkenkundenvixn--3hcrj9civilaviationth" + + "ewifiatlassian-dev-myqnapcloudcontrolledogawarabikomaezakirunoip" + + "irangalsaceomutashinainternationalfirearmsannanvlaanderennesoyvl" + + "adikavkazimierz-dolnyvladimirvlogintoyonezawavmincomcastresindev" + + "icenzaporizhzhiavologdanskoninjambylvolvolkswagentspeedpartnervo" + + "lyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiecivili" + + "sationwithgoogleapiszwithyoutuberspacekitagatamayufuettertdasnet" + + "zwiwatsukiyonosegawawixsitewloclawekonsulatrobeeldengeluidvarese" + + "rvdwmcloudwmflabspydebergwoodsideltairavpagexlworse-thandawowind" + + "owskrakowinnersphinxn--3e0b707ewpdevcloudwpenginepoweredwphosted" + + "mailwpmucdnpixolinodeusercontentrentinoaltoadigewpmudeveloperaun" + + "iterois-foundationwritesthisblogwroclawiospjelkavikomorotsukagaw" + + "awtcirclerkstagets-itrentoyonakagyokutoyakolobrzegersundwtfastvp" + + "s-serverisignwuozuwzmiuwajimaxn--45q11civilwarmiasadoesntexistei" + + "ngeekaruizawaxn--4gbriminingxn--4it168dxn--4it797kooris-a-painte" + + "ractivestfoldxn--4pvxs4allxn--54b7fta0cclanbibaidarmeniaxn--55qw" + + "42gxn--55qx5dxn--5js045dxn--5rtp49cldmailuxuryxn--5rtq34kopervik" + + "hersonxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2r" + + "xn--6qq986b3xlxn--7t0a264cleverappstmnxn--80aaa0cvacationsrhtren" + + "tinsud-tirolxn--80adxhksrlxn--80ao21axn--80aqecdr1axn--80asehdba" + + "refootballooninglassassinationalheritagebinordre-landiscourses3-" + + "sa-east-1xn--80aswgxn--80augustowitdkonskowolayangrouphonefossho" + + "pwarendalenugxn--8ltr62koryokamikawanehonbetsurutaharaxn--8pvr4u" + + "xn--8y0a063axn--90a1affinitylotterybnikeisenbahnxn--90a3academia" + + "micable-modemoneyxn--90aeroportalaheadjudaicadaquesrvaroyxn--90a" + + "ishobarakawagoexn--90amcdirxn--90azhytomyravendbargainstances3-u" + + "s-east-2xn--9dbhblg6dietrevisojamisonxn--9dbq2axn--9et52uxn--9kr" + + "t00axn--andy-iraxn--aroport-byaotsurnadalxn--asky-iraxn--aurskog" + + "-hland-jnbarreauctioncilla-speziauthgear-stagingjesdalimanowarud" + + "aurskog-holandinggfarmerseineatonsbergitpagefrontappalmspringsak" + + "erevistarnbergivestbytemark12xn--avery-yuasakuragawaxn--b-5gaxn-" + + "-b4w605ferdxn--balsan-sdtirol-nsbstorebaselectrentinsudtirolxn--" + + "bck1b9a5dre4clicketcloudcontrolapparmatsushigexn--bdddj-mrabdxn-" + + "-bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-" + + "k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyasakaiminat" + + "oyookanazawaxn--bjddar-ptargetmyipizzaxn--blt-elabourxn--bmlo-gr" + + "aingerxn--bod-2natalxn--bozen-sdtirol-2obanazawaxn--brnny-wuacad" + + "emy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptib" + + "leadpagesquare7xn--brum-voagatritonxn--btsfjord-9zaxn--bulsan-sd" + + "tirol-nsbarrel-of-knowledgeappleborkaragandauthgearappspacehoste" + + "d-by-previderhclouddnslivegarsheiheijibigawaustevoll-o-g-i-n4t3l" + + "3p0rtarnobrzegyptianatuurwetenschappenginebetsuikirkenes3-ap-sou" + + "th-1xn--c1avgxn--c2br7gxn--c3s14miniserverxn--cck2b3barrell-of-k" + + "nowledgecomputerhistoryofscience-fictionfabricafjs3-us-gov-west-" + + "1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--c" + + "g4bkis-gonexn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2oxn" + + "--correios-e-telecomunicaes-ghc29axn--czr694barsycenterprisesaki" + + "joburgleezebizenakanotoddenayorovnobirauthordalanddnss3-ap-south" + + "east-2xn--czrs0troandinosaureplantationxn--czru2dxn--czrw28barsy" + + "onlinewhampshirebungoonord-frontierxn--d1acj3basicserversaillesj" + + "abbottatarantours3-us-west-1xn--d1alfaromeoxn--d1atrogstadxn--d5" + + "qv7z876clickrisinglesannohelplfinancialuzernxn--davvenjrga-y4axn" + + "--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluwerxn--drb" + + "ak-wuaxn--dyry-iraxn--e1a4clinichitachinakagawassamukawatarikuze" + + "ntakatainaioiraseating-organicbcn-north-1xn--eckvdtc9dxn--efvn9s" + + "torfjordxn--efvy88haibarakitahatakamatsukawaxn--ehqz56nxn--elqq1" + + "6hair-surveillancexn--eveni-0qa01gaxn--f6qx53axn--fct429kosakaer" + + "odromegallupaasdaburxn--fhbeiarnxn--finny-yuaxn--fiq228c5hstorjc" + + "loud-ver-jpchristmasakinderoyxn--fiq64basilicataniautomotiveland" + + "ds3-ca-central-1xn--fiqs8stpetersburgxn--fiqz9streamscompute-1xn" + + "--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-" + + "cesena-fcbsstudioxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grandra" + + "pidstudynamisches-dnsortlandxn--frna-woaraisaijosoyrovigotpanthe" + + "onsitexn--frya-hraxn--fzc2c9e2cliniquedapliernewyorkshirecifedex" + + "eterxn--fzys8d69uvgmailxn--g2xx48clintonoshoesanokarumaifarmstea" + + "dyndns-at-homedepotenzamamidorittogakushimotoganexn--gckr3f0faus" + + "kedsmokorsetagayaseralingenoamishirasatogitsumidatlantichofunato" + + "riginstantcloudfrontdoorxn--gecrj9clothingdustdatadetectjmaxxxer" + + "oxfinityxn--ggaviika-8ya47hakatanorth-kazakhstanxn--gildeskl-g0a" + + "xn--givuotna-8yasugitlaborxn--gjvik-wuaxn--gk3at1exn--gls-elacai" + + "xaxn--gmq050is-into-animegurownproviderxn--gmqw5axn--gnstigbeste" + + "llen-zvbrplsbxn--3pxu8konyvelohmusashimurayamarumorimachidaxn--g" + + "nstigliefern-wobihirosakikamijimatsunowtvestre-totennishiawakura" + + "xn--h-2failxn--h1aeghakodatexn--h1ahnxn--h1alizxn--h2breg3evenes" + + "tuff-4-salexn--h2brj9c8cn-northwest-1xn--h3cuzk1diherokuappkomfo" + + "rbar1xn--hbmer-xqaxn--hcesuolo-7ya35basketballfinanzjampalacehim" + + "ejiiyamanouchikuhokuryugasakitanakagusukumodernfshostrodawarauto" + + "scanadaeguambulancentralus-2xn--hery-iraxn--hgebostad-g3axn--hkk" + + "inen-5waxn--hmmrfeasta-s4accident-prevention-k3stufftoread-books" + + "nesoruminamiuonumasoyxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxax" + + "n--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1" + + "a6a2exn--imr513nxn--indery-fyasuokannamiharuxn--io0a7is-into-car" + + "shiratakahagithubpreviewsaitamatsukuris-a-llamarcheapigeelvinckd" + + "diamondshirahamatonbetsurgeryxn--j1adplantsomnarviikamiokameokam" + + "akurazakitashiobaraxn--j1aefbsbxn--1ck2e1banzaicloudappspotagerx" + + "n--j1ael8batochiokinoshimaintenancempresashibetsukuin-vpncasadel" + + "amonedancemrxn--j1amhakonexn--j6w193gxn--jlq480n2rgxn--jlq61u9w7" + + "batsfjordiscoveryokoteu-1xn--jlster-byatominamidaitomanchesterxn" + + "--jrpeland-54axn--jvr189minisitexn--k7yn95exn--karmy-yuaxn--kbrq" + + "7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7d" + + "xn--kltx9axn--klty5xn--41axn--koluokta-7ya57hakubahcavuotnagaivu" + + "otnagaokakyotambabydgoszczecinemagnethnologyxn--kprw13dxn--kpry5" + + "7dxn--kput3is-into-cartoonshishikuis-a-musicianxn--krager-gyatsu" + + "kanoyakumoldellogliastradingxn--kranghke-b0axn--krdsherad-m8axn-" + + "-krehamn-dxaxn--krjohka-hwab49jdevcloudfunctionshisohugheshisuif" + + "uelveruminamiminowaxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsu" + + "shiroxn--kvnangen-k0axn--l-1fairwindstuttgartrentinsued-tirolxn-" + + "-l1accentureklamborghinikolaeventsurreyxn--laheadju-7yawaraxn--l" + + "angevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52bauhauspost" + + "man-echocolatelevisionflashdrivefsncfdishakotanhlfanhsbcasertail" + + "scalecznagasukeu-2xn--lesund-huaxn--lgbbat1ad8jdfaststacksaxoxn-" + + "-lgrd-poacctromsakegawaxn--lhppi-xqaxn--linds-pramericanartromso" + + "kamogawaxn--lns-qlavagiskexn--loabt-0qaxn--lrdal-sraxn--lrenskog" + + "-54axn--lt-liacngroks-thisayamanobeokakegawaxn--lten-granexn--lu" + + "ry-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddesusakis-b" + + "ytomaritimekeepingxn--mgb9awbfbx-oslodingenxn--mgba3a3ejtrusteex" + + "n--mgba3a4f16axn--mgba3a4fra1-deportevaksdalxn--mgba7c0bbn0axn--" + + "mgbaakc7dvfbxostrowwlkpmguidefinimamateramochizukindlegallocus-4" + + "xn--mgbaam7a8hakuis-a-financialadvisor-aurdalxn--mgbab2bdxn--mgb" + + "ah1a3hjkrdxn--mgbai9a5eva00bellunord-odalvdalaskanittedallasalle" + + "angaviikadenagahamaroyerxn--mgbai9azgqp6jejuniperxn--mgbayh7gpal" + + "ermomahachijolsterxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn-" + + "-mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn-" + + "-mgbi4ecexposedxn--mgbpl2fhskypexn--mgbqly7c0a67fbcnpyatigorskol" + + "efrakkestadyndns-at-workisboringrondarxn--mgbqly7cvafr-1xn--mgbt" + + "3dhdxn--mgbtf8flapymntrvestre-slidretrosnubarclays3-fips-us-gov-" + + "west-1xn--mgbtx2beneventodayokozeu-3xn--mgbx4cd0abbvieeexn--mix0" + + "82fedorainfraclouderaxn--mix891fedorapeoplegnicapebretonamicroli" + + "ghtinguitarschokokekschokoladenxn--mjndalen-64axn--mk0axin-the-b" + + "andais-into-gamessinazawaxn--mk1bu44cnsantabarbaraxn--mkru45is-l" + + "eetrentin-sued-tirolxn--mlatvuopmi-s4axn--mli-tlavangenxn--mlsel" + + "v-iuaxn--moreke-juaxn--mori-qsakurais-lostre-toteneis-a-nascarfa" + + "nxn--mosjen-eyawatahamaxn--mot-tlazioxn--mre-og-romsdal-qqbusera" + + "nishiaritakurashikis-not-certifiedxn--msy-ula0hakusanagochijiwad" + + "egreexn--mtta-vrjjat-k7aflakstadaokagakicks-assnasaarlandxn--muo" + + "st-0qaxn--mxtq1minnesotaketakatoris-a-techietis-a-libertarianxn-" + + "-ngbc5azdxn--ngbe9e0axn--ngbrxn--42c2d9axn--nit225koseis-a-patsf" + + "anxn--nmesjevuemie-tcbalsan-sudtirollagdenesnaaseinet-freaksuson" + + "oxn--nnx388axn--nodessakyotanabellevuelosangelesuzakanagawaxn--n" + + "qv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaes" + + "eoullensvanguardxn--nvuotna-hwaxn--nyqy26axn--o1achernihivgubsuz" + + "ukananiikappudoxn--o3cw4haldenxn--o3cyx2axn--od0algxn--od0aq3ben" + + "tleyolasiteu-4lima-cityeatselinogradimo-i-rana4u2-localhostrolek" + + "aniepce12hpalmaserati234xn--ogbpf8flatangerxn--oppegrd-ixaxn--os" + + "tery-fyaxn--osyro-wuaxn--otu796dxn--p1acfedoraprojectoyotsukaido" + + "xn--p1ais-savedxn--pgbs0dhlx3xn--porsgu-sta26feiraquarelleaseekl" + + "ogescholarshipschoolsztynsettsurfashionxn--pssu33lxn--pssy2uxn--" + + "q7ce6axn--q9jyb4cntjomelhusgardenxn--qcka1pmckinseyxn--qqqt11min" + + "tereitrentino-altoadigexn--qxa6axn--qxamsterdamnserverbaniaxn--r" + + "ady-iraxn--rdal-poaxn--rde-ulaxn--rdy-0nabaris-slickfh-muensterx" + + "n--rennesy-v1axn--rhkkervju-01afermockasserverrankoshigayamein-v" + + "igorgexn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--" + + "rht3dxn--rht61exn--risa-5naturalhistorymuseumcenterxn--risr-irax" + + "n--rland-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31halsaitohmanno" + + "rthflankaufentigerxn--rovu88beppublishproxyombolzano-altoadigeol" + + "ogyomitanobninskarasjohkamikitayamatsurincheonikonanporobserverx" + + "n--rros-granvindafjordxn--rskog-uuaxn--rst-0naturalsciencesnatur" + + "ellesuzukis-certifiedxn--rsta-framercanvasvalbardunloppacificita" + + "deliveryggeexn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1fait" + + "hammarfeastafricapitalonewspaperxn--s9brj9collectionxn--sandness" + + "jen-ogbeskidyn-ip24xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--" + + "ses554gxn--sgne-graphoxn--45br5cylxn--skierv-utazasvcitichiryuky" + + "uragifuchungbukharahkkeravjuegoshikimobetsuldaluccaravantaarparl" + + "iamentjeldsundrudupontariobranconavstackareliancexn--skjervy-v1a" + + "xn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5naturbruksgymn" + + "xn--slt-elabcieszynh-serveblogspotaribeiraogakibichuoxn--smla-hr" + + "axn--smna-gratangentlentapisa-geekosherbrookegawaxn--snase-nraxn" + + "--sndre-land-0cbestbuyshouses3-us-west-2xn--snes-poaxn--snsa-roa" + + "xn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-g" + + "gbetainaboxfusejnyanagawalmartateshinanomachimkentateyamaveroyke" + + "nebakkeshibechambagriculturealtychyattorneyagawakepnombrendlynge" + + "nflfanpachigasakids3-eu-central-1xn--srfold-byaxn--srreisa-q1axn" + + "--srum-gratis-a-bulls-fanxn--stfold-9xaxn--stjrdal-s1axn--stjrda" + + "lshalsen-sqbhzcasinordeste-idcateringebuildinglitcheltenham-radi" + + "o-opensocialimolisembokuleuvenetokigawavocatanzaroweddingjovikan" + + "zakitchenaval-d-aosta-valleyboltarumizusawaustinnaumburgivingjem" + + "nes3-ap-southeast-1xn--stre-toten-zcbieidskoguchikuzenvironmenta" + + "lconservationionjukudoyamaizurugbyglandroverhallaakesvuemielecce" + + "vje-og-hornnes3-website-ap-northeast-1xn--t60b56axn--tckwebthing" + + "sveioxn--tiq49xqyjelasticbeanstalkhakassiaxn--tjme-hraxn--tn0agr" + + "ocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tiro" + + "l-rzbielawaltervistaikikonaikawachinaganoharamcoachampionshiphop" + + "tobamadridnbloggerxn--trentin-sdtirol-7vbiellahppiacenzachpomors" + + "kieninohekinannestadiskussionsbereichattanooganordkappgafaninomi" + + "yakonojorpelandisrechtranakamagayahikobeardubaiduckdnsnillfjordi" + + "tchyouripanamatsusakahoginankokubunjindianapolis-a-bloggerxn--tr" + + "entino-sd-tirol-c3bieszczadygeyachiyodaejeonbukcoalwaysdatabaseb" + + "allangenkainanaejrietisalatinabeno-ipifony-1xn--trentino-sdtirol" + + "-szbievat-band-campaniavoues3-eu-west-1xn--trentinosd-tirol-rzbi" + + "fukagawashingtondclk3xn--trentinosdtirol-7vbigv-infolldalivornow" + + "ruzhgorodeoceanographics3-website-ap-southeast-1xn--trentinsd-ti" + + "rol-6vbihorologyonagoyaxarnetbankaracoldwarszawaustraliamusement" + + "dllpages3-ap-northeast-2ix4432-balsan-suedtirolkuszczytnord-aurd" + + "alp16-b-datacentermezproxyzgorabruzzoologicalabamagasakishimabar" + + "aogashimadachicagoboats3-ap-northeast-1kappchizip611xn--trentins" + + "dtirol-nsbikedaemonmoutheworkpccwedeployonagunicloudivtasvuodnak" + + "amurataishinomakinkobierzycextraspace-to-rentalstomakomaibarazur" + + "ewebsiteshikagamiishibukawakkanaibetsubamericanfamilydsmynasushi" + + "obarackmazeplayokosukanraustrheimatunduhrennebugattiffanyaarbort" + + "eaches-yogasawaracingjerdrumcprequalifymeinforumzgorzeleccogjers" + + "tadotsuruokakamigaharaukraanghkembuchikumagayagawakayamagentosit" + + "ecnologiajudygarlanddnskingdyniamunemurorangecloudplatform0emmaf" + + "ann-arboretumbriamallamaceiobbcg120001wwwbq-abogadobeaemcloud-fr" + + "1337xn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0" + + "atvestvagoyxn--uc0ay4axn--uist22hamurakamigoris-a-geekautokeinot" + + "iceablewismillerxn--uisz3gxn--unjrga-rtargithubusercontentryclou" + + "dflareportrentinsuedtirolxn--unup4yxn--uuwu58axn--vads-jraxn--va" + + "lle-aoste-ebbtrysiljanxn--valle-d-aoste-ehbodoes-itcouldbeworldx" + + "n--valleaoste-e7axn--valledaoste-ebbvadsoccertmgrazerbaijan-maye" + + "ngerdalcesvelvikomvuxn--32vp30hagakhanamigawaxn--vard-jraxn--veg" + + "rshei-c0axn--vermgensberater-ctbitsvizzeraxn--vermgensberatung-p" + + "wblogoiplatformshangrilanxessooxn--vestvgy-ixa6oxn--vg-yiabkhazi" + + "axn--vgan-qoaxn--vgsy-qoa0jelenia-goraxn--vgu402colognexus-3xn--" + + "vhquvevelstadxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-" + + "yla5gxn--vuq861bilbaokinawashirosatobishimagazineues3-website-ap" + + "-southeast-2xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1c" + + "olonialwilliamsburgrongausdalvivanovoldaxn--wgbl6axn--xhq521bill" + + "ustrationredumbrellair-traffic-controlleyoriikarasjokarasuyamarn" + + "ardalombardiadembetsukubankaratexn--xkc2al3hye2axn--xkc2dl3a5ee0" + + "handsonyxn--y9a3aquariumisakis-a-therapistoiaxn--yer-znaturhisto" + + "rischesvn-reposoundcastronomy-routerxn--yfro4i67oxn--ygarden-p1a" + + "xn--ygbi2ammxn--45brj9civilizationxn--ystre-slidre-ujbioceanogra" + + "phiquexn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bipanasonicath" + + "olicaxiaskimitsubatamibudejjuedischesapeakebayernirasakindianmar" + + "ketingliwicexnbayxz" // nodes is the list of nodes. Each node is represented as a uint32, which // encodes the node's children, wildcard bit and node type (as an index into @@ -540,1825 +540,1827 @@ const text = "9guacuiababia-goracleaningroks-theatree164-balsfjordd-dnshome-we" // An I denotes an ICANN domain. // // The layout within the uint32, from MSB to LSB, is: +// // [ 0 bits] unused // [10 bits] children index // [ 1 bits] ICANN bit // [15 bits] text index // [ 6 bits] text length var nodes = [...]uint32{ - 0x3346c3, - 0x358784, - 0x2ed746, - 0x21cb43, - 0x21cb46, - 0x399c86, - 0x3c3043, - 0x221ec4, - 0x205687, - 0x2ed388, + 0x330b03, + 0x3b6e44, + 0x2e8c86, + 0x350003, + 0x350006, + 0x392c06, + 0x3b9283, + 0x21a084, + 0x3deb07, + 0x2e88c8, 0x1a000c2, - 0x1f44d87, - 0x385a09, - 0x2e460a, - 0x2e460b, - 0x238883, - 0x241b45, - 0x2204502, - 0x204504, - 0x2cdbc3, - 0x252f05, - 0x2602142, - 0x36c683, - 0x2a20c04, - 0x202145, - 0x2e03702, - 0x219e8e, - 0x267743, - 0x3bdcc6, - 0x3203f42, - 0x2ed8c7, - 0x244206, - 0x3603802, - 0x2979c3, - 0x235c86, - 0x21d4c8, - 0x29e686, - 0x3aa2c4, + 0x1f42f07, + 0x37f0c9, + 0x2ddc4a, + 0x2ddc4b, + 0x233b83, + 0x236ac5, + 0x2213c82, + 0x3d6204, + 0x2c8983, + 0x231c05, + 0x2601ac2, + 0x367443, + 0x2a2ffc4, + 0x201ac5, + 0x2e06482, + 0x20648e, + 0x25b543, + 0x3b32c6, + 0x3204782, + 0x3e57c7, + 0x23a206, + 0x3603682, + 0x2909c3, + 0x22c386, + 0x2691c8, + 0x295546, + 0x276dc4, 0x3a00b02, - 0x354109, - 0x224ac7, - 0x3804c6, - 0x3704c9, - 0x391a48, - 0x24fcc4, - 0x349446, - 0x3d4146, - 0x3e01902, - 0x2fe906, - 0x218e0f, - 0x3e960e, - 0x311c44, - 0x228685, - 0x339845, - 0x3b1ec9, - 0x24a949, - 0x236487, - 0x207a06, - 0x207943, - 0x4204302, - 0x216183, - 0x2b990a, - 0x4604d03, - 0x347a45, - 0x314102, - 0x3ac849, - 0x4e01d42, - 0x20ad44, - 0x2ffcc6, - 0x258405, - 0x37ce44, - 0x5721084, - 0x205d83, - 0x241004, - 0x5a016c2, - 0x300b84, - 0x5e02084, - 0x35030a, + 0x350889, + 0x21a3c7, + 0x2ff486, + 0x369ac9, + 0x2ca948, + 0x246004, + 0x320146, + 0x3d8b46, + 0x3e01c02, + 0x2fc746, + 0x212d4f, + 0x3d99ce, + 0x2e4804, + 0x20d105, + 0x335fc5, + 0x3a8989, + 0x2427c9, + 0x22cb87, + 0x2239c6, + 0x22edc3, + 0x4216302, + 0x216303, + 0x2a868a, + 0x4615c43, + 0x3456c5, + 0x2f45c2, + 0x3a5c49, + 0x4e028c2, + 0x208844, + 0x3c9a86, + 0x2968c5, + 0x376c04, + 0x570fdc4, + 0x2028c3, + 0x235fc4, + 0x5a01942, + 0x357344, + 0x5e01a04, + 0x214f0a, 0x6200882, - 0x2631c7, - 0x2339c8, - 0x7a0c502, - 0x33e407, - 0x237c44, - 0x36ddc7, - 0x237c45, - 0x387787, - 0x35ca86, - 0x2a1784, - 0x3b9245, - 0x27a907, - 0x9218682, - 0x2fea83, - 0x9620102, - 0x3dabc3, - 0x9a0f202, - 0x289b85, + 0x20bd07, + 0x3be8c8, + 0x7a08b82, + 0x33a387, + 0x22da04, + 0x31b047, + 0x22da05, + 0x380e47, + 0x34d986, + 0x358c84, + 0x36af05, + 0x274707, + 0x9205982, + 0x2b0403, + 0x961f9c2, + 0x3d3583, + 0x9a03602, + 0x254845, 0x9e00202, - 0x37fe84, - 0x23cfc5, - 0x311b87, - 0x30420e, - 0x2ca444, - 0x28fac4, - 0x204243, - 0x324fc9, - 0x2ec50b, - 0x2fd348, - 0x335848, - 0x33b708, - 0x3d6448, - 0xa37030a, - 0x387687, - 0x2542c6, - 0xa61cb82, - 0x378743, - 0x3e0b43, - 0x3e2084, - 0x378783, - 0x397143, - 0x1740a82, - 0xaa02742, - 0x292d05, - 0x35a086, - 0x2aa1c4, - 0x3a9487, - 0x251c86, - 0x2da804, - 0x3c5787, - 0x202743, - 0xb6e7fc2, - 0xba1da82, - 0xbe16c02, - 0x21db46, + 0x3793c4, + 0x3ccb05, + 0x2e4747, + 0x2b294e, + 0x2c3904, + 0x235044, + 0x207843, + 0x301889, + 0x306acb, + 0x391a88, + 0x331f88, + 0x337bc8, + 0x3ceec8, + 0xa36990a, + 0x380d47, + 0x3f3ac6, + 0xa65a502, + 0x3de703, + 0x3e32c3, + 0x3e4884, + 0x3de743, + 0x354783, + 0x173ec82, + 0xaa08a42, + 0x28b785, + 0x2ac746, + 0x2a29c4, + 0x3a1f47, + 0x237906, + 0x2d7f04, + 0x3bb3c7, + 0x221bc3, + 0xb6e2082, + 0xba69782, + 0xbe16d82, + 0x217b46, 0xc200282, - 0x3676c5, - 0x341fc3, - 0x3dc584, - 0x305544, - 0x305545, - 0x3f2583, - 0xc60bec3, - 0xca0aa82, - 0x20e485, - 0x20e48b, - 0x30f70b, - 0x211d44, - 0x20ed49, - 0x20fdc4, - 0xce10482, - 0x210cc3, - 0x211243, - 0xd211f02, - 0x216f8a, - 0xd602642, - 0x204785, - 0x2f8cca, - 0x24f984, - 0x2139c3, - 0x2153c4, - 0x218043, - 0x218044, - 0x218047, - 0x219285, - 0x21a286, - 0x21ae86, - 0x21cbc3, - 0x2234c8, - 0x215a03, - 0xda03ac2, - 0x365d08, - 0x29e90b, - 0x22c908, - 0x22d286, - 0x22d907, - 0x22f608, + 0x266485, + 0x3401c3, + 0x3d7244, + 0x303a84, + 0x303a85, + 0x3f1d43, + 0xc650b03, + 0xca05a42, + 0x207fc5, + 0x207fcb, + 0x31228b, + 0x206204, + 0x208909, + 0x209544, + 0xce09902, + 0x20a143, + 0x20a6c3, + 0xd20b4c2, + 0x21710a, + 0xd60b782, + 0x3d6485, + 0x2f258a, + 0x245cc4, + 0x20d603, + 0x20e404, + 0x211443, + 0x211444, + 0x211447, + 0x213d45, + 0x214506, + 0x2156c6, + 0x217503, + 0x21b748, + 0x21e083, + 0xda02fc2, + 0x241708, + 0x2957cb, + 0x224788, + 0x225106, + 0x225287, + 0x227b48, 0xf201002, - 0xf629b02, - 0x27f688, - 0x23b007, - 0x213f05, - 0xfb1cc08, - 0xffb11c8, - 0x285f03, - 0x235504, - 0x399d02, - 0x10235982, - 0x1060b282, - 0x10e36f02, - 0x236f03, - 0x11203d82, - 0x315983, - 0x254ac4, - 0x203d83, - 0x24fc84, - 0x25198b, - 0x2063c3, - 0x301806, - 0x26c004, - 0x2d85ce, - 0x3259c5, - 0x279e08, - 0x3bddc7, - 0x3bddca, - 0x23c843, - 0x358587, - 0x2ec6c5, - 0x23c844, - 0x266e86, - 0x266e87, - 0x376204, - 0x1171c644, - 0x3f3784, - 0x251584, - 0x209f46, - 0x213cc3, - 0x213cc8, - 0x21ff08, - 0x2a55c3, - 0x216f43, - 0x34d4c4, - 0x35ee03, - 0x11e06282, - 0x12227e82, - 0x205206, - 0x349543, - 0x27b604, - 0x12611582, - 0x211583, - 0x23d283, - 0x21f9c2, - 0x12a045c2, - 0x2e0c86, - 0x22ad87, - 0x233747, - 0x2fc645, - 0x3b7c84, - 0x2e0085, - 0x2d5187, - 0x3594c9, - 0x3b1646, - 0x2fc546, - 0x13a12f82, - 0x320588, - 0x32c046, - 0x333c45, - 0x317507, - 0x3bb044, - 0x3bb045, - 0x3bf004, - 0x3bf008, - 0x13e02882, - 0x14200482, - 0x38e506, + 0xf620302, + 0x27a748, + 0x3dab47, + 0x31ba45, + 0xfb1ba48, + 0xfedf508, + 0x27d5c3, + 0x22bfc4, + 0x392c82, + 0x1022cdc2, + 0x10668142, + 0x10e2d3c2, + 0x22d3c3, + 0x11201782, + 0x313a43, + 0x24a844, + 0x201783, + 0x245fc4, + 0x23760b, + 0x202f03, + 0x2f9446, + 0x214d84, + 0x2d368e, + 0x2ff905, + 0x273c08, + 0x3b33c7, + 0x3b33ca, + 0x231543, + 0x3b6c47, + 0x306c85, + 0x231544, + 0x25c046, + 0x25c047, + 0x36ff44, + 0x1171b484, + 0x381dc4, + 0x238904, + 0x3c1386, + 0x20f543, + 0x3c1748, + 0x3f2f08, + 0x29dc43, + 0x2170c3, + 0x34a7c4, + 0x35b203, + 0x11e02dc2, + 0x12621942, + 0x202986, + 0x320243, + 0x23a9c4, + 0x12a13282, + 0x213283, + 0x3818c3, + 0x218442, + 0x12e03402, + 0x2d95c6, + 0x22b987, + 0x2ff287, + 0x2f5d45, + 0x3cb8c4, + 0x370c05, + 0x2c9747, + 0x3582c9, + 0x2df986, + 0x2f5c46, + 0x13e04102, + 0x30f188, + 0x32a0c6, + 0x22ad85, + 0x3b1f07, + 0x3b5d04, + 0x3b5d05, + 0x3a24c4, + 0x3a24c8, + 0x14205202, + 0x14600482, + 0x238ac6, 0x200488, - 0x3402c5, - 0x357386, - 0x361608, - 0x3653c8, - 0x14605985, - 0x1722f8c4, - 0x2849c7, - 0x17607e42, - 0x17b6e3c2, - 0x18e0b642, - 0x2ffdc5, - 0x19a9a145, - 0x27a446, - 0x3dafc7, - 0x3e6887, - 0x19e1a043, - 0x34af07, - 0x28f9c8, - 0x27a38d49, - 0x21a047, - 0x239447, - 0x34be48, - 0x239fc6, - 0x23c346, - 0x23e30c, - 0x24000a, - 0x2404c7, - 0x241a0b, - 0x242707, - 0x24270e, - 0x27e43444, - 0x243544, - 0x245687, - 0x38df47, - 0x249e06, - 0x249e07, - 0x33bfc7, - 0x288583, - 0x28203682, - 0x24c806, - 0x24c80a, - 0x24d24b, - 0x24e8c7, - 0x24f505, - 0x250943, - 0x250cc6, - 0x250cc7, - 0x21d9c3, - 0x28600102, - 0x2512ca, - 0x28b347c2, - 0x28fa8a82, - 0x2924a282, - 0x2963cc82, - 0x254705, - 0x255504, - 0x2a203402, - 0x300c05, - 0x24d843, - 0x3cabc5, - 0x205104, - 0x22f4c4, - 0x2e3a86, - 0x267ac6, - 0x20e683, - 0x3d8a84, - 0x35cd43, - 0x2b2101c2, - 0x22dc84, - 0x22dc86, - 0x25bb85, - 0x2bc906, - 0x317608, - 0x215b84, - 0x3289c8, - 0x38f745, - 0x294ec8, - 0x2de106, - 0x318747, - 0x282a44, - 0x2c682a46, - 0x2ca24dc3, - 0x3ad7c3, - 0x2c5608, - 0x33c744, - 0x2ce15487, - 0x28b246, - 0x2f55c9, - 0x36ea08, - 0x371dc8, - 0x37b1c4, - 0x223403, - 0x2304c2, - 0x2d664842, - 0x2da30902, - 0x329e83, - 0x2de057c2, - 0x21d944, - 0x29ef86, - 0x241543, - 0x2d1ec7, - 0x20cec3, - 0x2ca508, - 0x231645, - 0x2724c3, - 0x23cf45, - 0x23d084, - 0x3bad46, - 0x2366c6, - 0x311ac6, - 0x2e2904, - 0x242ac3, - 0x2e217e02, - 0x2e642185, - 0x200843, + 0x33e305, + 0x353686, + 0x35d788, + 0x361888, + 0x14a02c45, + 0x176204c4, + 0x2576c7, + 0x17a08fc2, + 0x17f547c2, + 0x19202202, + 0x3c9b85, + 0x19ee9e05, + 0x274246, + 0x2dc247, + 0x3e8c07, + 0x1a206643, + 0x321c47, + 0x289a48, + 0x2822e709, + 0x206647, + 0x22ef07, + 0x349208, + 0x22f706, + 0x231046, + 0x23240c, + 0x23324a, + 0x233bc7, + 0x23698b, + 0x237c87, + 0x237c8e, + 0x286391c4, + 0x2392c4, + 0x23b287, + 0x271d87, + 0x240086, + 0x240087, + 0x332dc7, + 0x21dac3, + 0x28a2dd42, + 0x243106, + 0x24310a, + 0x2439cb, + 0x2457c7, + 0x247105, + 0x2473c3, + 0x247746, + 0x247747, + 0x2696c3, + 0x28e00102, + 0x247e0a, + 0x29330c02, + 0x297a1542, + 0x29a41402, + 0x29e31982, + 0x24a485, + 0x24b704, + 0x2aa54302, + 0x3573c5, + 0x231bc3, + 0x374145, + 0x361b84, + 0x226f84, + 0x2dd186, + 0x25cb86, + 0x2081c3, + 0x3d1404, + 0x358fc3, + 0x2ba023c2, + 0x225604, + 0x225606, + 0x24fd45, + 0x399fc6, + 0x3b2008, + 0x21de44, + 0x257208, + 0x3267c5, + 0x28e348, + 0x2d8d86, + 0x2b9b07, + 0x27cf44, + 0x2d67cf46, + 0x2da1a6c3, + 0x3a5603, + 0x371008, + 0x338504, + 0x2de0e4c7, + 0x2862c6, + 0x2f0109, + 0x302208, + 0x375208, + 0x381944, + 0x2180c3, + 0x228b02, + 0x2e656442, + 0x2ea014c2, + 0x328243, 0x2ee060c2, - 0x21c0c3, - 0x300a45, - 0x2f226783, - 0x2fa410c9, - 0x2fe00942, - 0x30612002, - 0x29fe85, - 0x221206, - 0x2bd3c6, - 0x3171c8, - 0x3171cb, - 0x34fb0b, - 0x2fc845, - 0x2e8549, + 0x269644, + 0x295e46, + 0x2328c3, + 0x2cb1c7, + 0x3dc083, + 0x2c39c8, + 0x3816c5, + 0x26aa03, + 0x3cca85, + 0x3ccbc4, + 0x3b1c06, + 0x3b7406, + 0x2e4686, + 0x2db944, + 0x238043, + 0x2f25f042, + 0x2f637105, + 0x200843, + 0x2fe02c02, + 0x20f343, + 0x258c05, + 0x3021f603, + 0x30a36089, + 0x30e00942, + 0x3160b5c2, + 0x299245, + 0x2193c6, + 0x2924c6, + 0x30d788, + 0x30d78b, + 0x34cc8b, + 0x2f5f45, + 0x2e2609, 0x1601082, - 0x2cee08, - 0x20f044, - 0x30e01b42, - 0x346043, - 0x316e82c6, - 0x31a02182, - 0x3d6a48, - 0x31e0a842, - 0x2738ca, - 0x32679783, - 0x32f86046, - 0x31de08, - 0x221b86, - 0x396707, - 0x219007, - 0x3d3cca, - 0x24fa04, - 0x36c404, - 0x385109, - 0x333bd905, - 0x219ec6, - 0x2115c3, - 0x2872c4, - 0x33601d04, - 0x201d07, - 0x33bae987, - 0x264f44, - 0x2424c5, - 0x27a508, - 0x256007, - 0x256287, - 0x33e1b5c2, - 0x2ae1c4, - 0x2a99c8, - 0x259284, - 0x25d444, - 0x25d845, - 0x25d987, - 0x34b68049, - 0x23be84, - 0x25f649, - 0x2607c8, - 0x262104, - 0x262107, - 0x264643, - 0x265cc7, - 0x34e00bc2, - 0x16ccd82, - 0x266946, - 0x2c48c7, - 0x2672c4, - 0x268887, - 0x269387, - 0x26a303, - 0x352698c2, - 0x215ac2, - 0x26b7c3, - 0x26b7c4, - 0x26b7cb, - 0x335948, - 0x215ac4, - 0x26ca05, - 0x26e147, - 0x2fa885, - 0x34deca, - 0x270903, - 0x35605b02, - 0x2727c4, - 0x274309, - 0x277c03, - 0x277cc7, - 0x244b49, - 0x20e288, - 0x203743, - 0x291e47, - 0x244603, - 0x299704, - 0x29adc9, - 0x29dc86, - 0x32b303, - 0x203f82, - 0x2ccb83, - 0x2ccb87, - 0x39ab45, - 0x3a3c06, - 0x2d1704, - 0x39b945, - 0x2927c3, - 0x21f0c6, - 0x2094c3, - 0x20ef42, - 0x25c904, - 0x35a03942, - 0x35f339c3, - 0x36206582, - 0x243b43, - 0x21b304, - 0x25a807, - 0x2a8606, - 0x201cc2, - 0x36601c82, - 0x372004, - 0x36e181c2, - 0x37203d02, - 0x2af084, - 0x2af085, - 0x20db45, - 0x3d4d06, - 0x37607282, - 0x370985, - 0x3dccc5, - 0x207283, - 0x2feb46, - 0x2140c5, - 0x21dac2, - 0x363205, - 0x21dac4, - 0x222343, - 0x222583, - 0x37a0e002, - 0x27ab07, - 0x26e304, - 0x26e309, - 0x2871c4, - 0x299fc3, - 0x2c97c8, - 0x37e99fc4, - 0x299fc6, - 0x2bc303, - 0x23f783, - 0x20f583, - 0x3830f1c2, - 0x317442, - 0x38600642, - 0x343e08, - 0x2157c8, - 0x3c9706, - 0x3b6e45, - 0x230185, - 0x349707, - 0x38a62fc5, - 0x216e82, - 0x38eaca82, - 0x39200042, - 0x28f288, - 0x3204c5, - 0x309c84, - 0x242e05, - 0x24ad47, - 0x3df244, - 0x25aec2, - 0x3963ff82, - 0x35a8c4, - 0x362607, - 0x2a0407, - 0x387744, - 0x3e1243, - 0x2a5504, - 0x2a5508, - 0x39a3c686, - 0x266d0a, - 0x3af884, - 0x2a9408, - 0x242344, - 0x22da06, - 0x2aca44, - 0x3000c6, - 0x26e5c9, - 0x2bba87, - 0x3a80c3, - 0x39e01742, - 0x2830c3, - 0x210682, - 0x3a209bc2, - 0x257c86, - 0x28adc8, - 0x2bd547, - 0x317a89, - 0x2bd709, - 0x2bf805, - 0x2c0d09, - 0x2c2005, - 0x2c3205, - 0x2c3fc8, - 0x3a616c44, - 0x3aa16c47, - 0x239803, - 0x2c41c7, - 0x239806, - 0x2c4e47, - 0x2bb585, - 0x2390c3, - 0x3ae30fc2, - 0x213c04, - 0x3b20e9c2, - 0x3b607142, - 0x376406, - 0x233945, - 0x2c7e07, - 0x33be43, - 0x3d6e44, - 0x213903, - 0x23ad43, - 0x3ba06502, - 0x3c202502, - 0x399d84, - 0x269883, - 0x30e905, - 0x3c613f82, - 0x3ce04182, - 0x3a0fc6, - 0x304144, - 0x310f44, - 0x310f4a, - 0x3d6005c2, - 0x20f2c3, - 0x22838a, - 0x231a48, - 0x3da5c204, + 0x2e8f88, + 0x203f04, + 0x31e01342, + 0x3441c3, + 0x32671f46, + 0x32a01b02, + 0x3cf4c8, + 0x32e04c02, + 0x26c74a, + 0x336220c3, + 0x33f7f706, + 0x31cec8, + 0x219d46, + 0x38f207, + 0x212f47, + 0x3d86ca, + 0x245d44, + 0x3671c4, + 0x37e709, + 0x343b2f05, + 0x2064c6, + 0x2132c3, + 0x255ec4, + 0x346e2504, + 0x33b487, + 0x34ba6807, + 0x280984, + 0x35dec5, + 0x274308, + 0x24c387, + 0x24c607, + 0x34e0fd02, + 0x31f0c4, + 0x2a21c8, + 0x24e304, + 0x251604, + 0x2519c5, + 0x251b07, + 0x35b51789, + 0x253144, + 0x253e09, + 0x2554c8, + 0x255c44, + 0x255c47, + 0x256243, + 0x256d47, + 0x35e00bc2, + 0x16c5fc2, + 0x25bb06, + 0x2bdd07, + 0x25c384, + 0x25de87, + 0x25f687, + 0x260483, + 0x362596c2, + 0x21e142, + 0x2619c3, + 0x2619c4, + 0x2619cb, + 0x332088, + 0x21e144, + 0x262c05, + 0x264687, + 0x2f3d05, + 0x32920a, + 0x267c83, + 0x36608102, + 0x23e644, + 0x26d209, + 0x270c43, + 0x270d07, + 0x3613c9, + 0x34f6c8, + 0x264d43, + 0x28a7c7, + 0x291103, + 0x292644, + 0x293349, + 0x297786, + 0x2ae103, + 0x208782, + 0x2c5dc3, + 0x2c5dc7, + 0x389d85, + 0x357186, + 0x212804, + 0x395305, + 0x28b243, + 0x217746, + 0x272fc3, + 0x208b02, + 0x250ac4, + 0x36a34382, + 0x36e34383, + 0x372030c2, + 0x20bfc3, + 0x215b44, + 0x252a07, + 0x2a0786, + 0x26d1c2, + 0x3766d602, + 0x3b2204, + 0x37e115c2, + 0x3820c782, + 0x20c784, + 0x20c785, + 0x33c345, + 0x3c3dc6, + 0x38610202, + 0x2fdf45, + 0x3323c5, + 0x2e9d43, + 0x2fc986, + 0x210205, + 0x217ac2, + 0x35e485, + 0x217ac4, + 0x21dd83, + 0x21dfc3, + 0x38a074c2, + 0x274907, + 0x2556c4, + 0x2556c9, + 0x255dc4, + 0x2b6943, + 0x2c2c88, + 0x38ee9c84, + 0x2e9c86, + 0x2b4843, + 0x263643, + 0x205503, + 0x393034c2, + 0x38c902, + 0x39600642, + 0x341f88, + 0x3d2408, + 0x3c01c6, + 0x29a7c5, + 0x2bb385, + 0x3c7f87, + 0x39a86e45, + 0x2062c2, + 0x39ea4542, + 0x3a200042, + 0x287c08, + 0x30f0c5, + 0x308604, + 0x389605, + 0x394147, + 0x29ee04, + 0x2594c2, + 0x3a6331c2, + 0x356044, + 0x30f447, + 0x2997c7, + 0x380e04, + 0x3e3a43, + 0x29db84, + 0x29db88, + 0x3aa31386, + 0x25beca, + 0x351644, + 0x2a1c08, + 0x2372c4, + 0x225386, + 0x2a4504, + 0x3c9e86, + 0x255989, + 0x2b3fc7, + 0x3a0dc3, + 0x3ae17382, + 0x27e1c3, + 0x209b02, + 0x3b20af02, + 0x254606, + 0x285e48, + 0x2b6687, + 0x35f289, + 0x2b6849, + 0x2b8005, + 0x2b9fc9, + 0x2bb4c5, + 0x2bc045, + 0x2bd508, + 0x3b610084, + 0x3ba10087, + 0x22f2c3, + 0x2bd707, + 0x22f2c6, + 0x2be1c7, + 0x2b3805, + 0x22ea83, + 0x3be29602, + 0x381d04, + 0x3c21fec2, + 0x3c615fc2, + 0x37cd06, + 0x3be845, + 0x2c1107, + 0x2fd603, + 0x354704, + 0x201603, + 0x3be503, + 0x3ca03042, + 0x3d601442, + 0x392d04, + 0x259683, + 0x30d445, + 0x3da04142, + 0x3e206a42, + 0x389806, + 0x2fbf04, + 0x30ecc4, + 0x30ecca, + 0x3ea005c2, + 0x252383, + 0x20ce0a, + 0x20fc88, + 0x3ee503c4, 0x2005c3, - 0x251a83, - 0x2d1fc9, - 0x27cfc9, - 0x231c06, - 0x3de0bc83, - 0x23bb4d, - 0x23dbc6, - 0x2497cb, - 0x3e20b882, - 0x349288, - 0x432235c2, - 0x43603202, - 0x2c6405, - 0x43a040c2, - 0x2b3307, - 0x209a83, - 0x214288, - 0x43e01ac2, - 0x32a7c4, - 0x22cc83, - 0x24d946, - 0x22b9c4, - 0x216f03, - 0x45202382, - 0x2fc7c4, - 0x2cb785, - 0x2cc787, - 0x290943, - 0x2cd143, - 0x1626b42, - 0x2cd803, - 0x2cde03, - 0x45600c02, - 0x279504, - 0x242cc6, - 0x363983, - 0x2ce203, - 0x45a5cf02, - 0x25cf08, - 0x2cf5c4, - 0x2206c6, - 0x393f87, - 0x391d06, - 0x2c5584, - 0x53e01782, - 0x2396cb, - 0x3cdfce, - 0x222f8f, - 0x2e0403, - 0x546dbe82, - 0x164f082, - 0x54a04082, - 0x2b6103, - 0x3cc144, - 0x220243, - 0x359746, - 0x248286, - 0x2b46c7, - 0x390e44, - 0x54e21342, - 0x55232ac2, - 0x309345, - 0x3e4e07, - 0x3cedc6, - 0x5567a6c2, - 0x390744, - 0x2d4783, - 0x55a0af02, - 0x55f82443, - 0x2d5b84, - 0x2dbdc9, - 0x562e3dc2, - 0x5661c4c2, - 0x259605, - 0x56ae42c2, - 0x57203e02, - 0x3698c7, - 0x385c8b, - 0x218dc5, - 0x2682c9, - 0x26f6c6, - 0x57609004, - 0x379289, - 0x377647, - 0x393347, - 0x237b43, - 0x2fe046, - 0x37ea87, - 0x21e803, - 0x2b93c6, - 0x57e16202, - 0x5820b602, - 0x21cd43, - 0x3aca05, - 0x3b14c7, - 0x248386, - 0x39aac5, - 0x26e284, - 0x2ba945, - 0x3141c4, - 0x58609a42, - 0x2e2544, - 0x2d4544, - 0x3a73cd, - 0x2d4549, - 0x260e88, - 0x259884, - 0x3ebb05, - 0x3b80c7, - 0x3d1184, - 0x370c87, - 0x22c4c5, - 0x58abea84, - 0x2be3c5, - 0x58e768c4, - 0x31a606, - 0x3dadc5, - 0x592032c2, - 0x336a83, - 0x251f83, - 0x2459c4, - 0x2459c5, - 0x20d706, - 0x38de05, - 0x26eb44, - 0x59713103, - 0x59a14746, - 0x223d05, - 0x225845, - 0x3daec4, - 0x304c03, - 0x3af90c, - 0x59ecc882, - 0x5a200b42, - 0x5a612a82, - 0x222043, - 0x222044, - 0x5aa0fe02, - 0x302888, - 0x22be04, - 0x3301c6, - 0x5ae24902, - 0x5b216902, - 0x5b603f02, - 0x2a4a45, - 0x300346, - 0x238044, - 0x2361c6, - 0x212506, - 0x26ac03, - 0x5ba9d60a, - 0x299f05, - 0x2b98c3, - 0x213606, - 0x5be13609, - 0x22e147, - 0x3c8548, - 0x391909, - 0x3bfe88, - 0x2a3c46, - 0x20a2c3, - 0x5c205942, - 0x3b0148, - 0x5c6593c2, - 0x5ca00ec2, - 0x2384c3, - 0x2fc3c5, - 0x2b1244, - 0x2bdf89, - 0x23ca84, - 0x255308, - 0x5d20b4c3, - 0x5d66b484, - 0x221248, - 0x5da15142, - 0x23d8c2, - 0x3397c5, - 0x3bba89, - 0x214e43, - 0x32eb84, - 0x3f0484, - 0x2202c3, - 0x29558a, - 0x5df9b302, - 0x5e213a42, - 0x2e7f43, - 0x39d383, - 0x161c182, - 0x377f43, - 0x5e62ab02, - 0x5ea01942, - 0x5ee30944, - 0x296ac6, - 0x281884, - 0x28f0c3, - 0x3da783, - 0x5f30c783, - 0x24d5c6, - 0x339b85, - 0x2eba87, - 0x2eb9c6, - 0x2ec108, - 0x2ec306, - 0x201944, - 0x2b2dcb, - 0x2eea43, - 0x2eea45, - 0x5f60ffc2, - 0x369bc2, - 0x5fa54782, - 0x5fe0d3c2, - 0x221383, - 0x60282182, - 0x282183, - 0x2f0343, - 0x60a067c2, - 0x60ef3c06, - 0x2f4005, - 0x2b7346, - 0x6127c082, - 0x61611282, - 0x61a225c2, - 0x61e736c2, - 0x622130c2, - 0x62602202, - 0x21c603, - 0x3a9686, - 0x62a25a04, - 0x3b52c6, - 0x2907c4, - 0x2ec4c3, - 0x63603642, - 0x201f42, - 0x238cc3, - 0x63a14883, - 0x3cec47, - 0x3dacc7, - 0x6965c3c7, - 0x316147, - 0x218943, - 0x69a7a004, - 0x327e04, - 0x327e0a, - 0x3e69c5, - 0x69e31a82, - 0x268843, - 0x6a200602, - 0x262243, - 0x283083, - 0x6aa00582, - 0x28f944, - 0x349904, - 0x3cb645, - 0x32bd85, - 0x311f06, - 0x311186, - 0x6ae18882, - 0x6b2025c2, - 0x2fb405, - 0x2b7052, - 0x326ac6, - 0x22a783, - 0x22a786, - 0x269b85, - 0x1616fc2, - 0x73611f42, - 0x336783, - 0x211f43, - 0x2ac1c3, - 0x73a17a02, - 0x21fe83, - 0x73e07982, - 0x230983, - 0x37b608, - 0x248803, - 0x248806, - 0x3e8187, - 0x3271c6, - 0x3271cb, - 0x290707, - 0x313084, - 0x74600e82, - 0x3a3b45, - 0x74a14843, - 0x247043, - 0x20b8c5, - 0x218843, - 0x75218846, - 0x2ac403, - 0x203584, + 0x237703, + 0x2cb2c9, + 0x26b289, + 0x20fe46, + 0x3f211e43, + 0x32054d, + 0x230886, + 0x247a4b, + 0x3f605cc2, + 0x31ff88, + 0x4421b842, + 0x44602802, + 0x2bfe45, + 0x44a02b82, + 0x2aaac7, + 0x20adc3, + 0x2103c8, + 0x44e04b02, + 0x2bc5c4, + 0x224b03, + 0x2440c6, + 0x230a84, + 0x217083, + 0x46201d02, + 0x2f5ec4, + 0x2c4c45, + 0x2c59c7, + 0x288e83, + 0x2c7003, + 0x16c76c2, + 0x2c76c3, + 0x2c7b43, + 0x46600c02, + 0x221e44, + 0x34d006, + 0x27d843, + 0x2c7fc3, + 0x46a510c2, + 0x2510c8, + 0x2c8c84, + 0x3b6686, + 0x38ca87, + 0x3ae1c6, + 0x370f84, + 0x54e01302, + 0x22f18b, + 0x2c650e, + 0x21b1cf, + 0x3a9cc3, + 0x556d5782, + 0x1646c82, + 0x55a06002, + 0x242443, + 0x3bf3c4, + 0x288983, + 0x358546, + 0x389c06, + 0x3c3087, + 0x244804, + 0x55e19502, + 0x56229d02, + 0x307cc5, + 0x302d47, + 0x3ba846, + 0x566744c2, + 0x389544, + 0x2cda83, + 0x56a06982, + 0x56f7bc03, + 0x2ce904, + 0x2d56c9, + 0x572dd4c2, + 0x57639842, + 0x24e685, + 0x57add802, + 0x58204fc2, + 0x363ec7, + 0x37f34b, + 0x212d05, + 0x248009, + 0x265e06, + 0x5861cd44, + 0x3c58c9, + 0x3e7587, + 0x38be47, + 0x22d903, + 0x2f8406, + 0x325a07, + 0x2721c3, + 0x2c0686, + 0x58e0d9c2, + 0x5922a2c2, + 0x3b7203, + 0x3a5e05, + 0x2df807, + 0x38ffc6, + 0x389d05, + 0x255644, + 0x2b2085, + 0x311944, + 0x59601282, + 0x2db584, + 0x26b184, + 0x26b18d, + 0x2d92c9, + 0x393f88, + 0x201284, + 0x267945, + 0x2ff707, + 0x3c22c4, + 0x2fe247, + 0x226505, + 0x59ab7284, + 0x2ba645, + 0x59e6f904, + 0x318046, + 0x2dc045, + 0x5a2663c2, + 0x22a283, + 0x30cf03, + 0x23b5c4, + 0x23b5c5, + 0x21c2c6, + 0x389e45, + 0x264cc4, + 0x5a700ec3, + 0x5aa10886, + 0x20a8c5, + 0x218f45, + 0x2dc144, + 0x3516c3, + 0x3516cc, + 0x5aec5ac2, + 0x5b200b42, + 0x5b606b42, + 0x20f743, + 0x20f744, + 0x5ba09582, + 0x2fa4c8, + 0x2665c4, + 0x32ea06, + 0x5be1a202, + 0x5c2065c2, + 0x5c605e42, + 0x29d5c5, + 0x3ca106, + 0x35ed44, + 0x22c8c6, + 0x20bac6, + 0x228343, + 0x5ca9748a, + 0x2e9bc5, + 0x2a8643, + 0x225ac6, + 0x5cff3f49, + 0x225ac7, + 0x28f848, + 0x2ca809, + 0x3a3348, + 0x29ca06, + 0x206a83, + 0x5d202042, + 0x3a7ac8, + 0x5d64e442, + 0x5da00ec2, + 0x23ddc3, + 0x2dfa85, + 0x2a7d84, + 0x2bd2c9, + 0x231784, + 0x235ac8, + 0x5e209b43, + 0x5e65f304, + 0x219408, + 0x5eac7f42, + 0x230582, + 0x335f45, + 0x234e09, + 0x206543, + 0x32c584, + 0x3a7f44, + 0x255a83, + 0x28e94a, + 0x5ef94cc2, + 0x5f20d682, + 0x2e2003, + 0x396ec3, + 0x160f402, + 0x3b3083, + 0x5f61cf02, + 0x5fa01502, + 0x5fe28f84, + 0x28f406, + 0x27c704, + 0x287a43, + 0x208483, + 0x6030b843, + 0x243d46, + 0x336305, + 0x2e6947, + 0x2e6886, + 0x2e7588, + 0x2e7786, + 0x220084, + 0x2a9ccb, + 0x2ea443, + 0x2ea445, + 0x606066c2, + 0x3641c2, + 0x60a4a502, + 0x60e03c42, + 0x206e83, + 0x6127d202, + 0x27d203, + 0x2eaf83, + 0x61a03302, + 0x61eee6c6, + 0x2eeac5, + 0x29acc6, + 0x62275a82, + 0x6260a702, + 0x62a1e002, + 0x62e070c2, + 0x6320f8c2, + 0x63601b82, + 0x24b083, + 0x3d3446, + 0x63a94744, + 0x3ac646, + 0x288d04, + 0x301843, + 0x646024c2, + 0x2018c2, + 0x22e683, + 0x64a109c3, + 0x3d3687, + 0x2dbf47, + 0x6aa50587, + 0x314207, + 0x212343, + 0x6ae73e04, + 0x2ecf44, + 0x2ecf4a, + 0x3e8d45, + 0x6b20fcc2, + 0x25de43, + 0x6b600602, + 0x22b643, + 0x27e183, + 0x6be00582, + 0x2899c4, + 0x335904, + 0x3afb45, + 0x3226c5, + 0x22d006, + 0x2b9286, + 0x6c212282, + 0x6c601f42, + 0x2c6d85, + 0x29a9d2, + 0x2ad8c6, + 0x203d43, + 0x3d1f46, + 0x366905, + 0x1617142, + 0x74a0b502, + 0x3baec3, + 0x20b503, + 0x2afb03, + 0x74e03902, + 0x218903, + 0x75216282, + 0x228fc3, + 0x3afdc8, + 0x243503, + 0x243506, + 0x3ea507, + 0x333ac6, + 0x333acb, + 0x288c47, + 0x300e44, + 0x75a00e82, + 0x3570c5, + 0x75e01883, + 0x23c483, + 0x3c52c5, + 0x212243, + 0x76612246, + 0x2b1343, + 0x22c284, 0x2003c6, - 0x334046, - 0x7564acc3, - 0x285c07, - 0x244747, - 0x2b54c5, - 0x24eb06, - 0x21cc03, - 0x782cebc3, - 0x78607002, - 0x78a30704, - 0x3f3589, - 0x21b645, - 0x3af684, - 0x303a08, - 0x23d505, - 0x78e407c5, - 0x24f689, - 0x380583, - 0x3dca84, - 0x792035c2, - 0x221583, - 0x79681142, - 0x281146, - 0x168e5c2, - 0x79a15882, - 0x2a4948, - 0x2a54c3, - 0x2be307, - 0x39fc85, - 0x2fddc5, - 0x331a0b, - 0x2fddc6, - 0x331c06, - 0x24bb04, - 0x218386, - 0x79f00dc8, - 0x26c0c3, - 0x274003, - 0x274004, - 0x3e4d44, - 0x304e07, - 0x31ea05, - 0x7a324e02, - 0x7a608782, - 0x7ae08785, - 0x2e6ac4, - 0x2e6acb, - 0x305448, - 0x268f84, - 0x7b25cf42, - 0x7b609ec2, - 0x209ec3, - 0x306744, - 0x306a05, - 0x307387, - 0x7bb097c4, - 0x21bd84, - 0x7be05442, - 0x38a3c9, - 0x30a905, - 0x219085, - 0x30b185, - 0x7c205443, - 0x244e04, - 0x244e0b, - 0x30be44, - 0x30c10b, - 0x30c6c5, - 0x2230ca, - 0x30ce88, - 0x30d08a, - 0x30d883, - 0x30d88a, - 0x7ca1b482, - 0x7ce21e42, - 0x7d229a83, - 0x7d6cc442, - 0x310c43, - 0x7db129c2, - 0x7df42742, - 0x313e44, - 0x223606, - 0x235f05, - 0x317483, - 0x23b886, - 0x212c85, - 0x235804, - 0x7e200902, - 0x21d784, - 0x2e81ca, - 0x2cb0c7, - 0x351e86, - 0x2516c7, - 0x24c843, - 0x2d5bc8, - 0x34c64b, - 0x226445, - 0x3f05c5, - 0x3f05c6, - 0x304704, - 0x236d08, - 0x2221c3, - 0x28b8c4, - 0x3d4047, - 0x312cc6, - 0x346906, - 0x2d840a, - 0x25f6c4, - 0x2a1a8a, - 0x7e6f7b86, - 0x2f7b87, - 0x26ca87, - 0x28db44, - 0x28db49, - 0x230dc5, - 0x377483, - 0x236883, - 0x7ea06f83, - 0x234344, - 0x7ee00682, - 0x366906, - 0x7f2d9445, - 0x22a9c5, - 0x246646, - 0x215084, - 0x7f601a42, - 0x246744, - 0x7fe03b02, - 0x379105, - 0x230f44, - 0x8122ea03, - 0x81611f82, - 0x211f83, - 0x3bb206, - 0x81a02fc2, - 0x339288, - 0x22dfc4, - 0x22dfc6, - 0x39dc06, - 0x81e6e204, - 0x220f05, - 0x22f7c8, - 0x230007, - 0x286287, - 0x28628f, - 0x2a98c6, - 0x247943, - 0x249f84, - 0x234c03, - 0x22db44, - 0x3a23c4, - 0x8220b242, - 0x2625c3, - 0x33fc03, - 0x82606382, + 0x3dd9c6, + 0x76a1f143, + 0x3545c7, + 0x360fc7, + 0x2abc05, + 0x329dc6, + 0x20a903, + 0x796c88c3, + 0x79a06702, + 0x79e28d44, + 0x3f2d09, + 0x222b85, + 0x23d9c4, + 0x2fb7c8, + 0x245ac5, + 0x7a247285, + 0x260fc9, + 0x2ff543, + 0x3d7744, + 0x7a6020c2, + 0x219743, + 0x7aa795c2, + 0x2795c6, + 0x1686f42, + 0x7ae06fc2, + 0x29d4c8, + 0x29db43, + 0x2ba587, + 0x333d45, + 0x2cc285, + 0x2cc28b, + 0x2f8186, + 0x2cc486, + 0x244f04, + 0x211786, + 0x7b2f8a08, + 0x2622c3, + 0x267103, + 0x267104, + 0x302c84, + 0x30e087, + 0x341845, + 0x7b768e82, + 0x7ba04f82, + 0x7c204f85, + 0x2d23c4, + 0x2e32cb, + 0x303988, + 0x271c84, + 0x7c634dc2, + 0x7ca71c02, + 0x373dc3, + 0x304c84, + 0x304f45, + 0x3058c7, + 0x7cf08144, + 0x20f004, + 0x7d202b02, + 0x383b89, + 0x3096c5, + 0x212fc5, + 0x30a245, + 0x7d619683, + 0x23ab84, + 0x23ab8b, + 0x30af04, + 0x30b1cb, + 0x30b785, + 0x21b30a, + 0x30bec8, + 0x30c0ca, + 0x30c943, + 0x30c94a, + 0x7de15cc2, + 0x7e21a002, + 0x7e620283, + 0x7eb0e9c2, + 0x30e9c3, + 0x7ef104c2, + 0x7f340942, + 0x3115c4, + 0x21b886, + 0x22c605, + 0x3db3c6, + 0x3c1f05, + 0x30f784, + 0x7f600902, + 0x269484, + 0x2e228a, + 0x2c4587, + 0x3be686, + 0x237347, + 0x243143, + 0x2ce948, + 0x3ed24b, + 0x2d61c5, + 0x21d505, + 0x21d506, + 0x3a8084, + 0x3b7a48, + 0x214143, + 0x2a7e84, + 0x3d8a47, + 0x300a86, + 0x3e2106, + 0x2d34ca, + 0x23d704, + 0x23d70a, + 0x7fb70486, + 0x370487, + 0x262c87, + 0x267784, + 0x267789, + 0x229405, + 0x3e7503, + 0x20c4c3, + 0x7fe22b03, + 0x80200682, + 0x239ac6, + 0x806d7105, + 0x3d2185, + 0x236746, + 0x2c7e84, + 0x80a12482, + 0x236844, + 0x81210002, + 0x3c5745, + 0x229584, + 0x82627103, + 0x82a0b542, + 0x20b543, + 0x3b5ec6, + 0x82e04842, + 0x39ac48, + 0x225944, + 0x225946, + 0x33ca86, + 0x83264744, + 0x20e905, + 0x2203c8, + 0x225c47, + 0x228087, + 0x22808f, + 0x2a20c6, + 0x23ae03, + 0x23f044, + 0x227543, + 0x2254c4, + 0x382e44, + 0x8363f602, + 0x2a0f03, + 0x33d7c3, + 0x83a02ec2, + 0x202ec3, + 0x269703, + 0x213dca, + 0x31bc07, + 0x3a60cc, + 0x3a6386, + 0x251e86, + 0x259307, + 0x83e5d447, + 0x263789, + 0x84241844, + 0x84a06ec2, + 0x84e01042, + 0x2d3886, + 0x3543c4, + 0x2d4746, + 0x26abc8, + 0x3a5ec4, + 0x33da06, + 0x292485, + 0x8567e608, + 0x247843, + 0x282245, + 0x285c83, + 0x2130c3, + 0x2130c4, + 0x26b683, + 0x85a51502, + 0x85e00e02, + 0x3e73c9, + 0x28cb45, + 0x28cec4, + 0x298ac5, + 0x203544, + 0x2e6f07, + 0x35ea45, + 0x8661bc04, + 0x2f9f48, + 0x2c9bc6, + 0x2cf104, + 0x2cff48, + 0x86a01a42, + 0x2e3184, + 0x31c344, + 0x351387, + 0x86e04ac4, + 0x201cc2, + 0x87210a82, + 0x24e583, + 0x24e584, + 0x239803, + 0x38f6c5, + 0x87655182, + 0x2f4a85, + 0x27ccc2, + 0x317585, + 0x2e1085, + 0x87a03d02, + 0x381844, + 0x87e03c82, + 0x3e49c6, + 0x2d7c06, + 0x234f48, + 0x296048, + 0x37cc84, + 0x2f8bc5, + 0x8822a9c9, + 0x2e90c4, + 0x3ef104, + 0x2776c3, + 0x20e7c3, + 0x8860e7c5, + 0x275485, + 0x2e9f04, + 0x2b26c2, + 0x3315c3, + 0x88a02e82, + 0x88e01982, + 0x39a705, + 0x285b07, + 0x283d44, + 0x2caa09, + 0x2e23c9, + 0x202183, + 0x286d88, + 0x2a8c49, + 0x222607, + 0x8933d845, + 0x359b86, + 0x35b2c6, + 0x35c0c5, + 0x2d93c5, + 0x89605682, + 0x259205, + 0x2d8f88, + 0x2d5fc6, + 0x89b0b9c7, + 0x3a6744, + 0x371587, + 0x3b1106, + 0x89e0de02, + 0x21bfc6, + 0x317485, + 0x8a2429c2, + 0x8a618b82, + 0x27aec6, + 0x8aa99987, + 0x8ae38742, + 0x21a043, + 0x23e186, + 0x2d8e44, + 0x269c46, + 0x341606, + 0x2fdb0a, + 0x350145, + 0x21ef46, + 0x21f983, + 0x21f984, + 0x8b2021c2, + 0x32a083, + 0x8b60f782, + 0x333883, + 0x8ba0d084, + 0x2dfbc4, + 0x8bedfbca, 0x206383, - 0x21da03, - 0x21930a, - 0x31cdc7, - 0x3ae24c, - 0x3ae506, - 0x25dd06, - 0x269507, - 0x82a6ffc7, - 0x272589, - 0x82f65e44, - 0x836213c2, - 0x83a01042, - 0x2d87c6, - 0x285a04, - 0x2db286, - 0x277f48, - 0x3acac4, - 0x33a6c6, - 0x2bd385, - 0x842833c8, - 0x250dc3, - 0x287385, - 0x28d283, - 0x219183, - 0x219184, - 0x272983, - 0x846291c2, - 0x84a00e02, - 0x377349, - 0x2936c5, - 0x293a44, - 0x29f705, - 0x20f144, - 0x2eadc7, - 0x3637c5, - 0x85223984, - 0x302308, - 0x2cf686, - 0x2d4a04, - 0x2d4a08, - 0x85601a82, - 0x2dc104, - 0x31d504, - 0x354fc7, - 0x85a01a84, - 0x202342, - 0x85e14942, - 0x220203, - 0x259504, - 0x243a83, - 0x396bc5, - 0x8621e7c2, - 0x2f93c5, - 0x2827c2, - 0x319cc5, - 0x2e6805, - 0x8662c602, - 0x23d204, - 0x86a075c2, - 0x3e21c6, - 0x212f06, - 0x3bbbc8, - 0x29c8c8, - 0x376384, - 0x300f85, - 0x312649, - 0x2cef44, - 0x3effc4, - 0x27c243, - 0x289f03, - 0x86e89f05, - 0x26ed05, - 0x29a244, - 0x2a7382, - 0x334a03, - 0x87206342, - 0x87601b82, - 0x338d45, - 0x28d107, - 0x20d944, - 0x391b09, - 0x2e8309, - 0x28e403, - 0x28e408, - 0x2ab4c9, - 0x3d3b47, - 0x87b3a505, - 0x35d3c6, - 0x35da06, - 0x35eec5, - 0x2d4645, - 0x87e02802, - 0x202805, - 0x2e0788, - 0x2e1306, - 0x8830c907, - 0x3ae8c4, - 0x2df487, - 0x3a8246, - 0x88609202, - 0x20d406, - 0x31934a, - 0x319bc5, - 0x88a15982, - 0x88e37dc2, - 0x27fe06, - 0x892a05c7, - 0x89603382, - 0x221e83, - 0x3f38c6, - 0x2de1c4, - 0x253846, - 0x324bc6, - 0x20184a, - 0x336e05, - 0x3c7b06, - 0x3c8703, - 0x3c8704, - 0x89a05e02, - 0x32c003, - 0x89e22082, - 0x32bc83, - 0x8a228604, - 0x2dfc04, - 0x8a7b188a, - 0x219d83, - 0x387c47, - 0x3de4c6, - 0x328b44, - 0x2372c2, - 0x2184c2, - 0x8aa007c2, - 0x2ef983, - 0x26c847, + 0x2096c7, + 0x366c46, + 0x3888c4, + 0x22cec2, + 0x2298c2, + 0x8c2007c2, + 0x30fc43, + 0x262a47, 0x2007c7, - 0x29b784, - 0x220d87, - 0x307486, - 0x23b147, - 0x21dc44, - 0x223f05, - 0x2031c5, - 0x8ae09b02, - 0x226b06, - 0x22b203, - 0x231282, - 0x231286, - 0x8b229b42, - 0x8b649902, - 0x254905, - 0x8ba021c2, - 0x8be019c2, - 0x398c45, - 0x2eed85, - 0x313b85, - 0x8c673143, - 0x2f2505, - 0x2fde87, - 0x2bb945, - 0x336fc5, - 0x279f04, - 0x240646, - 0x253004, - 0x8ca008c2, - 0x8d6d0e85, - 0x215e07, - 0x203788, - 0x27b8c6, - 0x27b8cd, - 0x27cd89, - 0x27cd92, - 0x33c805, - 0x3465c3, - 0x8da0ea02, - 0x3247c4, - 0x23dc43, - 0x38a6c5, - 0x31a945, - 0x8de2ccc2, - 0x272503, - 0x8e25c442, - 0x8ea2b942, - 0x8ee00082, - 0x3f4005, - 0x3a81c3, - 0x8f20dfc2, - 0x8f60ba42, - 0x28f906, - 0x26b54a, - 0x22a743, - 0x245943, - 0x2ed243, - 0x91203342, - 0x9fa096c2, - 0xa0203b42, - 0x207882, - 0x334809, - 0x2e31c4, - 0x283dc8, - 0xa0627e42, - 0xa0e01102, - 0x271dc5, - 0x241e48, - 0x24d048, - 0x2f29cc, - 0x246143, - 0xa1276282, - 0xa160b382, - 0x2dac86, - 0x31be45, - 0x2f4e83, - 0x26aa06, - 0x31bf86, - 0x251a43, - 0x31d443, - 0x31d886, - 0x31f304, - 0x2637c6, - 0x3afc04, - 0x31f9c4, - 0x32134a, - 0xa1a561c2, - 0x2647c5, - 0x32290a, - 0x322845, - 0x323904, - 0x323a06, - 0x323b84, - 0x221846, - 0xa1e02402, - 0x21c7c6, - 0x325785, - 0x3c7987, - 0x3d0286, - 0x269704, - 0x2f5107, - 0x20d445, - 0x2477c7, - 0x22abc7, - 0x22abce, - 0x290106, - 0x248b85, - 0x2080c7, - 0x3dcb47, - 0x212105, - 0x2152c4, - 0x333502, - 0x28ac87, - 0x28d304, - 0x260644, - 0x2d568b, - 0xa22270c3, - 0x30dcc7, - 0x2270c4, - 0x30dfc7, - 0x224303, - 0x35680d, - 0x328848, - 0xa2656bc4, - 0x256bc5, - 0x3e1685, - 0x329083, - 0xa2a2dec2, - 0x32bfc3, - 0x32c803, - 0x2159c4, - 0x226c85, - 0x226d87, - 0x3c8786, - 0x39b403, - 0x2313cb, - 0x377b8b, - 0x2b87cb, - 0x2c144b, - 0x2c1d8a, - 0x3cce4b, - 0x3012cb, - 0x31c20c, - 0x31fdcb, - 0x36a851, - 0x37e70a, - 0x3c120b, - 0x3f1dcc, - 0x32d34b, - 0x32e2ca, - 0x32e94a, - 0x32fd0e, - 0x33034b, - 0x33060a, - 0x331d91, - 0x3321ca, - 0x3326cb, - 0x332c0e, - 0x334acc, - 0x334f4b, - 0x33520e, - 0x33558c, - 0x33750a, - 0x33870c, - 0xa2f3948a, - 0x339cc8, - 0x33a989, - 0x33c9ca, - 0x33cc4a, - 0x33cecb, - 0x33f38e, - 0x33fed1, - 0x34a749, - 0x34a98a, - 0x34b6cb, - 0x34cd4d, - 0x34dbca, - 0x34e396, - 0x34f70b, - 0x350b8a, - 0x3514ca, - 0x35250b, - 0x353f89, - 0x357189, - 0x358b0d, - 0x35994b, - 0x35b40b, - 0x35b8c9, - 0x35bf0e, - 0x35c54a, - 0x35d18a, - 0x35d7ca, - 0x35decb, - 0x35e70b, - 0x35f98d, - 0x36130d, - 0x362e90, - 0x36334b, - 0x363e4c, - 0x36514b, - 0x3693cb, - 0x36ce0e, - 0x36e0cb, - 0x36e0cd, - 0x3745cb, - 0x37504f, - 0x37540b, - 0x375dca, - 0x377889, - 0x37a2c9, - 0xa337a64b, - 0x37a90e, - 0x37ac8e, - 0x37c54b, - 0x37d2cf, - 0x38078b, - 0x380a4b, - 0x380d0a, - 0x385889, - 0x38820f, - 0x38d40c, - 0x38d9cc, - 0x38e94e, - 0x38ef8f, - 0x38f34e, - 0x38fd10, - 0x39010f, - 0x39110e, - 0x391fcc, - 0x3922d1, - 0x392712, - 0x393b11, - 0x39414e, - 0x39498b, - 0x39498e, - 0x394d0f, - 0x3950ce, - 0x395450, - 0x395853, - 0x395d11, - 0x39614c, - 0x39644e, - 0x3968cc, - 0x396d13, - 0x3977d0, - 0x397c4c, - 0x397f4c, - 0x39880b, - 0x39998e, - 0x399e8b, - 0x39a5cb, - 0x39bc8c, - 0x3a280a, - 0x3a2c4c, - 0x3a2f4c, - 0x3a3249, - 0x3a560b, - 0x3a58c8, - 0x3a6089, - 0x3a608f, - 0x3a7acb, - 0xa37a890a, - 0x3ab34c, - 0x3ac28b, - 0xa3bac549, - 0x3ad1c8, - 0x3ad58b, - 0x3aee0a, - 0x3af08a, - 0x3af30b, - 0x3afecc, - 0x3b1b09, - 0x3b1d48, - 0x3b464b, - 0x3b6f8b, - 0x3bcd0e, - 0x3be20b, - 0x3c0b8b, - 0x3cc50b, - 0x3cc7c9, - 0x3cd3cd, - 0x3dfeca, - 0x3e3357, - 0x3e4398, - 0x3e6b89, - 0x3e7dcb, - 0x3ed6d4, - 0x3edbcb, - 0x3ee14a, - 0x3eed0a, - 0x3eef8b, - 0x3ef6d0, - 0x3efad1, - 0x3f00ca, - 0x3f13cd, - 0x3f1acd, - 0x3f328b, - 0x226c03, - 0xa3e05503, - 0x21a846, - 0x209545, - 0x2f0b07, - 0x2c6b06, - 0xa420b2c2, - 0x2c0209, - 0x23b684, - 0x2fcdc8, - 0x206ec3, - 0x324707, - 0xa4603482, - 0x2c7e43, - 0xa4a079c2, - 0x2e8dc6, - 0x2e9b84, - 0x230ac4, - 0x204e83, - 0xa52e4302, - 0xa562fd04, - 0x28da87, - 0xa5a371c2, - 0x21a043, - 0x358c3, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x113508, - 0x208c43, + 0x295284, + 0x230147, + 0x3059c6, + 0x3dac87, + 0x217c44, + 0x21c505, + 0x210785, + 0x8c60ae42, + 0x361dc6, + 0x2309c3, + 0x231d02, + 0x231d06, + 0x8ca20342, + 0x8ce3d942, + 0x24a685, + 0x8d201b42, + 0x8d60c642, + 0x8df925c5, + 0x2e3e85, + 0x311305, + 0x8e26bfc3, + 0x2d9e05, + 0x2f8247, + 0x2b6cc5, + 0x350305, + 0x273d04, + 0x245946, + 0x254f84, + 0x8e6008c2, + 0x8f2b5585, + 0x37b547, + 0x2f8788, + 0x28e506, + 0x28e50d, + 0x28fa09, + 0x28fa12, + 0x387e05, + 0x391543, + 0x8f609a02, + 0x324704, + 0x230903, + 0x318785, + 0x319345, + 0x8fa24b42, + 0x26aa43, + 0x8fe50602, + 0x90624302, + 0x90a00082, + 0x3ee585, + 0x3a0ec3, + 0x90e07482, + 0x91205fc2, + 0x289986, + 0x277a0a, + 0x2056c3, + 0x23b543, + 0x2f0ac3, + 0x92e02642, + 0xa1641d82, + 0xa1e18182, + 0x2046c2, + 0x330c49, + 0x2dc8c4, + 0x3a0208, + 0xa2221902, + 0xa2a01102, + 0x282145, + 0x236dc8, + 0x32b148, + 0x2f0d4c, + 0x23ba43, + 0xa2e6f2c2, + 0xa320c302, + 0x2d4146, + 0x31a605, + 0x2ef943, + 0x273706, + 0x31a746, + 0x2376c3, + 0x31c283, + 0x31c946, + 0x31de04, + 0x20c306, + 0x3ec744, + 0x31e5c4, + 0x320bca, + 0xa364c542, + 0x2563c5, + 0x3229ca, + 0x322905, + 0x3236c4, + 0x3237c6, + 0x323944, + 0x219a06, + 0xa3a01d82, + 0x39e8c6, + 0x302045, + 0x3bd5c7, + 0x3c9246, + 0x259504, + 0x2efc47, + 0x21c005, + 0x25d2c7, + 0x22b7c7, + 0x22b7ce, + 0x288646, + 0x243885, + 0x204a07, + 0x3c2c87, + 0x20b6c5, + 0x214404, + 0x244b82, + 0x285d07, + 0x293244, + 0x24cf44, + 0x2e78cb, + 0xa3e20b83, + 0x326f07, + 0x220b84, + 0x327207, + 0x21c903, + 0x352b0d, + 0x326648, + 0xa424d404, + 0x24d405, + 0x3e3e85, + 0x326e83, + 0xa4625842, + 0x32a043, + 0x32ae03, + 0x21e044, + 0x361f45, + 0x362047, + 0x21fa06, + 0x394dc3, + 0x233e8b, + 0x3727cb, + 0x2aeccb, + 0x2badcb, + 0x2c78ca, + 0x2d594b, + 0x2f8f0b, + 0x3274cc, + 0x31e9cb, + 0x36534a, + 0x39c74b, + 0x3b558c, + 0x3f130b, + 0x32b74a, + 0x32c34a, + 0x32d68e, + 0x32de0b, + 0x32e0ca, + 0x32f191, + 0x32f5ca, + 0x32facb, + 0x33000e, + 0x33130c, + 0x33168b, + 0x33194e, + 0x331ccc, + 0x33324a, + 0x33500c, + 0xa4b35c0a, + 0x336448, + 0x336e49, + 0x33894a, + 0x338bca, + 0x338e4b, + 0x33cf4e, + 0x33df11, + 0x348109, + 0x34834a, + 0x348a8b, + 0x34a04d, + 0x34aeca, + 0x34b516, + 0x34c88b, + 0x34e18a, + 0x34e9ca, + 0x34f8cb, + 0x350709, + 0x353489, + 0x354a4d, + 0x35520b, + 0x356b8b, + 0x357509, + 0x357b4e, + 0x35874a, + 0x35940a, + 0x35994a, + 0x35a2cb, + 0x35ab0b, + 0x35b8cd, + 0x35d48d, + 0x35e110, + 0x35e5cb, + 0x35fc4c, + 0x36160b, + 0x3639cb, + 0x367bce, + 0x3682cb, + 0x3682cd, + 0x36e30b, + 0x36ed8f, + 0x36f14b, + 0x36fb0a, + 0x3724c9, + 0x374309, + 0xa4f7468b, + 0x37494e, + 0x374cce, + 0x37638b, + 0x37708f, + 0x379b0b, + 0x379dcb, + 0x37a08a, + 0x37ef49, + 0x38280f, + 0x386b0c, + 0x38748c, + 0x387ace, + 0x387fcf, + 0x38838e, + 0x388b10, + 0x388f0f, + 0x38a00e, + 0x38ab4c, + 0x38ae51, + 0x38b292, + 0x38c611, + 0x38cc4e, + 0x38d48b, + 0x38d48e, + 0x38d80f, + 0x38dbce, + 0x38df50, + 0x38e353, + 0x38e811, + 0x38ec4c, + 0x38ef4e, + 0x38f3cc, + 0x38f813, + 0x390990, + 0x390e0c, + 0x39110c, + 0x39218b, + 0x39290e, + 0x392e0b, + 0x39354b, + 0x39564c, + 0x39b18a, + 0x39bf4c, + 0x39c24c, + 0x39c549, + 0x39e04b, + 0x39e308, + 0x39eec9, + 0x39eecf, + 0x3a07cb, + 0xa53a13ca, + 0x3a360c, + 0x3a454b, + 0xa57a4809, + 0x3a5008, + 0x3a53cb, + 0x3a6c8a, + 0x3a6f0a, + 0x3a718b, + 0x3a784c, + 0x3a85c9, + 0x3a8808, + 0x3ab9cb, + 0x3ae48b, + 0x3b230e, + 0x3b380b, + 0x3b4f0b, + 0x3c698b, + 0x3c6c49, + 0x3c714d, + 0x3e264a, + 0x3e6257, + 0x3e6a98, + 0x3e8f09, + 0x3ea14b, + 0x3eb314, + 0x3eb80b, + 0x3ebd8a, + 0x3eca0a, + 0x3ecc8b, + 0x3ee810, + 0x3eec11, + 0x3ef20a, + 0x3f090d, + 0x3f100d, + 0x3f2a0b, + 0x361ec3, + 0xa5bd5603, + 0x27d646, + 0x286845, + 0x2eb907, + 0x2de506, + 0xa5e3c402, + 0x2b8a09, + 0x3db1c4, + 0x2f64c8, + 0x222a43, + 0x324647, + 0xa62428c2, + 0x2c1143, + 0xa6603642, + 0x2e2ec6, + 0x2e5184, + 0x229104, + 0x3d6b83, + 0xa6edd842, + 0xa7201844, + 0x2676c7, + 0xa762c082, + 0x206643, + 0x2cd03, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x11c748, + 0x21d783, 0x2000c2, - 0x1c3448, - 0x20b642, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x219303, - 0x345796, - 0x373113, - 0x220c09, - 0x2848c8, - 0x3a39c9, - 0x322a86, - 0x35a910, - 0x3d24d3, - 0x312d88, - 0x291087, - 0x29ba47, - 0x2ba68a, - 0x3b95c9, - 0x336809, - 0x265a0b, - 0x35ca86, - 0x335a4a, - 0x22d286, - 0x23a103, - 0x27aa45, - 0x213cc8, - 0x29464d, - 0x2ffe8c, - 0x325447, - 0x3bc70d, - 0x22f8c4, - 0x23e08a, - 0x23fb4a, - 0x24000a, - 0x2ae887, - 0x249c47, - 0x24e084, - 0x282a46, - 0x325944, - 0x226788, - 0x23cac9, - 0x3171c6, - 0x3171c8, - 0x25248d, - 0x2e8549, - 0x31de08, - 0x219007, - 0x254b4a, - 0x2c48c6, - 0x3766c4, - 0x212747, - 0x366b0a, - 0x3ee84e, - 0x262fc5, - 0x2a014b, - 0x2ef6c9, - 0x27cfc9, - 0x2098c7, - 0x2098ca, - 0x36df07, - 0x3ce109, - 0x233e08, - 0x20bc0b, - 0x2fc3c5, - 0x260d4a, - 0x222389, - 0x370e0a, - 0x20700b, - 0x21264b, - 0x265795, - 0x2dbfc5, - 0x219085, - 0x244e0a, - 0x27468a, - 0x36f207, - 0x2190c3, - 0x2d8748, - 0x2f2d4a, - 0x22dfc6, - 0x271ec9, - 0x2833c8, - 0x2d4a04, - 0x28e189, - 0x29c8c8, - 0x2de047, - 0x2d0e86, - 0x215e07, - 0x2cfb07, - 0x24d3c5, - 0x262e0c, - 0x256bc5, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x20b642, - 0x21a043, - 0x214883, - 0x208c43, - 0x24acc3, - 0x21a043, - 0x214883, - 0x248803, - 0x24acc3, - 0x1e0283, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x1c3448, - 0x20b642, - 0x21a043, - 0x238b87, - 0x33f84, - 0x214883, - 0x3b284, - 0x24acc3, - 0x12c85, - 0x20b642, - 0x203142, - 0x313002, - 0x201ac2, - 0x203cc2, - 0x20ea82, - 0x1874a, - 0x12c105, - 0x12c10a, - 0x152aa49, - 0x14bd4b, - 0x5f887, - 0x1bc3c6, - 0xa5286, - 0x67409, - 0x12b1c7, - 0x3504, - 0x14a150a, - 0x1540e, - 0x134309, - 0x482ea03, - 0x9e747, - 0x152986, + 0x1b9688, + 0x202202, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x213dc3, + 0x343916, + 0x36c653, + 0x22ffc9, + 0x2575c8, + 0x356f49, + 0x322b46, + 0x356090, + 0x3ed4d3, + 0x300b48, + 0x289647, + 0x293c47, + 0x2b1dca, + 0x36b289, + 0x3d3dc9, + 0x25364b, + 0x34d986, + 0x33218a, + 0x225106, + 0x22f843, + 0x274845, + 0x3c1748, + 0x28dacd, + 0x3c9c4c, + 0x301d07, + 0x31ec4d, + 0x2204c4, + 0x23218a, + 0x232d8a, + 0x23324a, + 0x31f787, + 0x23fec7, + 0x244ac4, + 0x27cf46, + 0x2ff884, + 0x21f608, + 0x2317c9, + 0x30d786, + 0x30d788, + 0x24848d, + 0x2e2609, + 0x31cec8, + 0x212f47, + 0x24a8ca, + 0x2bdd06, + 0x37cfc4, + 0x21dc07, + 0x239cca, + 0x23f70e, + 0x286e45, + 0x29950b, + 0x30f989, + 0x26b289, + 0x20ac07, + 0x20ac0a, + 0x31b187, + 0x2c6649, + 0x3eaa48, + 0x37360b, + 0x2dfa85, + 0x393e4a, + 0x21ddc9, + 0x2fe3ca, + 0x215e8b, + 0x21db0b, + 0x2533d5, + 0x2f6985, + 0x212fc5, + 0x23ab8a, + 0x2722ca, + 0x3107c7, + 0x213003, + 0x2d3808, + 0x2ed6ca, + 0x225946, + 0x25f809, + 0x27e608, + 0x2cf104, + 0x286b09, + 0x296048, + 0x2d8cc7, + 0x2b5586, + 0x37b547, + 0x2ca047, + 0x243b45, + 0x2a174c, + 0x24d405, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x202202, + 0x206643, + 0x2109c3, + 0x21d783, + 0x21f143, + 0x206643, + 0x2109c3, + 0x243503, + 0x21f143, + 0x1e2a03, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x1b9688, + 0x202202, + 0x206643, + 0x22e547, + 0x2b044, + 0x2109c3, + 0x1dadc4, + 0x21f143, + 0x1c1f05, + 0x202202, + 0x201482, + 0x300dc2, + 0x204b02, + 0x206282, + 0x2061c2, + 0x1214a, + 0x12a185, + 0x12a18a, + 0x1528d09, + 0x14910b, + 0x54047, + 0x1b1786, + 0x9d286, + 0x5c4c9, + 0xadfc7, + 0xf8504, + 0x15adf8a, + 0xe44e, + 0x18150c, + 0x1ddc89, + 0x4827103, + 0x95607, + 0x1106, 0xf83, - 0x127dc5, + 0xecf05, 0xc1, - 0x521a043, - 0x226783, - 0x204703, - 0x20f583, - 0x20bc83, - 0x214e43, - 0x2f4006, - 0x214883, - 0x24acc3, - 0x23ae43, - 0x1c3448, + 0x221bc3, + 0x5206643, + 0x2392c4, + 0x21f603, + 0x3d6403, + 0x205503, + 0x211e43, + 0x206543, + 0x2eeac6, + 0x29acc6, + 0x2109c3, + 0x21f143, + 0x2b6006, + 0x236e83, + 0x1b9688, 0x200984, - 0x26b247, - 0x204ec3, - 0x271984, - 0x206f43, - 0x209943, - 0x20f583, - 0xf5d87, - 0x1bfd04, - 0x2d03, - 0x1bee85, + 0x25f0c7, + 0x3d6bc3, + 0x291904, + 0x20aa83, + 0x20ac83, + 0x205503, + 0xf08c7, + 0x1a31c4, + 0x1d45c3, + 0x1a2345, 0x66000c2, - 0xbec3, - 0x6a0b642, - 0x6e99909, - 0x709f509, - 0x9fa0d, - 0x9fd4d, - 0x313002, - 0x5c204, - 0x1beec9, - 0x10210c, + 0x50b03, + 0x6a02202, + 0x6e92849, + 0x70988c9, + 0x98dcd, + 0x9910d, + 0x300dc2, + 0x503c4, + 0x1a2389, + 0xf9d4c, 0x2003c2, - 0x765c108, - 0x10b844, - 0x32b843, - 0x1c3448, - 0x8d304, - 0x1419002, + 0x76502c8, + 0x10a904, + 0x3295c3, + 0x1b9688, + 0x93244, + 0x1412f42, 0x14005c2, - 0x1419002, - 0x151fbc6, - 0x2405c3, - 0x2507c3, - 0x7e1a043, - 0x23e084, - 0x8626783, - 0x8e0f583, - 0x206502, - 0x25c204, - 0x214883, - 0x2100c3, - 0x201802, - 0x24acc3, - 0x225542, - 0x313783, - 0x202fc2, - 0x201683, - 0x224e43, - 0x20d442, - 0x1c3448, - 0x82a2449, - 0x10210c, - 0x29083, - 0x2405c3, - 0x21ff08, - 0x8a100c3, - 0x201802, - 0x313783, - 0x202fc2, - 0x201683, - 0x224e43, - 0x20d442, - 0x3ae507, - 0x313783, - 0x202fc2, - 0x201683, - 0x224e43, - 0x20d442, - 0x21a043, - 0x2742, - 0x13cc3, - 0x1b42, - 0xa842, - 0x1c82, - 0x5942, - 0x3342, - 0x5502, - 0x20bec3, - 0x21a043, - 0x226783, - 0x20f583, - 0x25c204, - 0x20bc83, - 0x214e43, - 0x225a04, - 0x214883, - 0x24acc3, - 0x204d42, - 0x205443, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x1a42, - 0xc503, - 0x7982, - 0x20bec3, - 0x20b642, - 0x21a043, - 0x226783, - 0x20f583, - 0x25c204, - 0x214883, - 0x24acc3, - 0x33a505, - 0x22ccc2, + 0x1412f42, + 0x151e7c6, + 0x233cc3, + 0x276803, + 0x7e06643, + 0x232184, + 0x861f603, + 0x8e05503, + 0x203042, + 0x2503c4, + 0x2109c3, + 0x21bf83, + 0x201582, + 0x21f143, + 0x219142, + 0x310f03, + 0x204842, + 0x2019c3, + 0x21a743, + 0x2059c2, + 0x1b9688, + 0x829b1c9, + 0xf9d4c, + 0x22403, + 0x233cc3, + 0x3f2f08, + 0x8a1bf83, + 0x201582, + 0x310f03, + 0x204842, + 0x2019c3, + 0x21a743, + 0x2059c2, + 0x3a6387, + 0x310f03, + 0x204842, + 0x2019c3, + 0x21a743, + 0x2059c2, + 0x206643, + 0x8a42, + 0xf543, + 0x1342, + 0x4c02, + 0x6d602, + 0x2042, + 0x2642, + 0x13142, + 0x250b03, + 0x206643, + 0x21f603, + 0x205503, + 0x2503c4, + 0x211e43, + 0x206543, + 0x294744, + 0x2109c3, + 0x21f143, + 0x215c82, + 0x219683, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x12482, + 0xab643, + 0x16282, + 0x250b03, + 0x202202, + 0x206643, + 0x21f603, + 0x205503, + 0x2503c4, + 0x2109c3, + 0x21f143, + 0x33d845, + 0x224b42, 0x2000c2, - 0x1c3448, - 0xaf12412, - 0xb2d9f88, - 0x10210c, - 0x1470708, - 0x16b8a, - 0x5985, - 0x53c7, - 0x20f583, - 0x201601, + 0x1b9688, + 0xae2a792, + 0xb3c2588, + 0xf9d4c, + 0x147e248, + 0x16d0a, + 0x2c45, + 0x1d54c7, + 0x205503, + 0x202701, 0x2009c1, - 0x202d01, - 0x201501, + 0x2026c1, + 0x202741, 0x200a41, - 0x22d881, + 0x226181, 0x200a01, - 0x205c41, - 0x24a501, + 0x232041, + 0x202781, 0x200001, 0x2000c1, 0x200201, - 0x14f985, - 0x1c3448, + 0x14cb05, + 0x1b9688, 0x200101, 0x200cc1, 0x200501, @@ -2372,7511 +2374,7524 @@ var nodes = [...]uint32{ 0x200ec1, 0x200581, 0x2003c1, - 0x2015c1, - 0x207a81, + 0x201401, + 0x207141, 0x200401, 0x200741, 0x2007c1, 0x200081, 0x201101, 0x200f81, - 0x202d81, - 0x203481, - 0x2033c1, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x20b642, - 0x21a043, - 0x226783, + 0x208f81, + 0x205381, + 0x201841, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x202202, + 0x206643, + 0x21f603, 0x2003c2, - 0x24acc3, - 0xf5d87, - 0x89207, - 0x38286, - 0x473ca, - 0x9e208, - 0x6bd08, - 0x6c747, - 0xc8b04, - 0x177f46, - 0xfaf05, - 0x1a4045, - 0xb8743, - 0x1b506, - 0x5f986, - 0x265a04, - 0x33e2c7, - 0x1c3448, - 0x2eef84, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0xb642, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x339688, - 0x3496c4, - 0x241004, - 0x211d44, - 0x2dab87, - 0x2f1a07, - 0x21a043, - 0x24354b, - 0x3b43ca, - 0x3a9b07, - 0x3f11c8, - 0x21e908, - 0x226783, - 0x33ba07, - 0x204703, - 0x217b48, - 0x225d89, - 0x25c204, - 0x20bc83, - 0x297b48, - 0x214e43, - 0x2eeb8a, - 0x2f4006, - 0x3b52c7, - 0x214883, - 0x2fe146, - 0x3e4088, - 0x24acc3, - 0x25e906, - 0x30568d, - 0x3070c8, - 0x30be4b, - 0x30f646, - 0x20d847, - 0x223305, - 0x3e890a, - 0x365905, - 0x26ec0a, - 0x22ccc2, + 0x21f143, + 0xf08c7, + 0x82b87, + 0x34106, + 0x3c80a, + 0x97d08, + 0x61f08, + 0x62947, + 0xc1e04, + 0x1ddf06, + 0xf4245, + 0x1cf805, + 0xaec43, + 0x15d46, + 0x54146, + 0x214f04, + 0x33a247, + 0x1b9688, + 0x2e4084, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x2202, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x335e08, + 0x3c7f44, + 0x235fc4, + 0x206204, + 0x2d4047, + 0x2ec587, + 0x206643, + 0x2392cb, + 0x3ab74a, + 0x388787, + 0x315b08, + 0x2afec8, + 0x21f603, + 0x373887, + 0x3d6403, + 0x203a48, + 0x20b289, + 0x2503c4, + 0x211e43, + 0x25ce88, + 0x206543, + 0x2ea58a, + 0x2eeac6, + 0x3ac647, + 0x2109c3, + 0x3becc6, + 0x2bea08, + 0x21f143, + 0x264506, + 0x303bcd, + 0x305608, + 0x30af0b, + 0x3121c6, + 0x33c047, + 0x217fc5, + 0x3dcfca, + 0x233d45, + 0x27538a, + 0x224b42, 0x200f83, - 0x260644, + 0x24cf44, 0x200006, - 0x3c3043, - 0x2b3c83, - 0x2332c3, - 0x23ac83, - 0x3ec3c3, - 0x201902, - 0x3a52c5, - 0x2bfbc9, - 0x204d03, - 0x24da43, - 0x205d83, - 0x20cb03, + 0x3b9283, + 0x2b4c83, + 0x38a7c3, + 0x23c0c3, + 0x3dd203, + 0x201c02, + 0x3a1085, + 0x2b83c9, + 0x215c43, + 0x2441c3, + 0x2028c3, + 0x213743, 0x200201, - 0x2ced07, - 0x2f2345, - 0x3ca803, - 0x262603, - 0x3f2583, - 0x211d44, - 0x33be83, - 0x214188, - 0x377ac3, - 0x31c8cd, - 0x2901c8, - 0x2200c6, - 0x304103, - 0x38bc43, - 0x3af583, - 0xde1a043, - 0x240908, - 0x243544, - 0x24a083, - 0x24e8c3, + 0x2e8e87, + 0x2d9c45, + 0x3c12c3, + 0x266483, + 0x3f1d43, + 0x206204, + 0x2fd643, + 0x2102c8, + 0x372703, + 0x31b70d, + 0x288708, + 0x3f30c6, + 0x2fbec3, + 0x385383, + 0x3a7403, + 0xde06643, + 0x234788, + 0x2392c4, + 0x240c03, + 0x2457c3, 0x200106, - 0x252c48, - 0x242ec3, - 0x220ac3, - 0x2c5383, - 0x224dc3, - 0x3e8943, - 0x21c0c3, - 0x226783, - 0x20ff83, - 0x25aa43, - 0x265883, - 0x2307c3, - 0x340403, - 0x213703, - 0x201d03, - 0x3ac745, - 0x2673c4, - 0x268507, - 0x2698c2, - 0x26a803, - 0x26ee86, - 0x270683, - 0x270a03, - 0x28e3c3, - 0x378803, - 0x221943, - 0x201c83, - 0x2ad347, - 0xea0f583, - 0x240243, - 0x20afc3, - 0x211983, - 0x231a43, - 0x21cb03, - 0x22b2c5, - 0x388583, - 0x24e349, + 0x249248, + 0x2023c3, + 0x21ce03, + 0x2be703, + 0x21a6c3, + 0x3dd003, + 0x20f343, + 0x21f603, + 0x216503, + 0x24e0c3, + 0x2534c3, + 0x228e03, + 0x33e443, + 0x39bac3, + 0x244bc3, + 0x3a5b45, + 0x25c484, + 0x25db07, + 0x2596c2, + 0x260983, + 0x265506, + 0x267a83, + 0x267d83, + 0x286d43, + 0x3de7c3, + 0x219b03, + 0x33b403, + 0x2a4e07, + 0xea05503, + 0x20ffc3, + 0x206a43, + 0x2036c3, + 0x20fc83, + 0x34ffc3, + 0x369d05, + 0x382b83, + 0x24dd09, 0x200c03, - 0x31ac43, - 0xee5b543, - 0x22bd83, - 0x203c83, - 0x219608, - 0x2bfb06, - 0x3785c6, - 0x2ca886, - 0x271607, - 0x2252c3, - 0x2384c3, - 0x214e43, - 0x29e306, - 0x20ffc2, - 0x2f33c3, - 0x344245, - 0x214883, - 0x32ad07, - 0x1608c43, - 0x2a1943, - 0x241a83, - 0x241503, - 0x247043, - 0x24acc3, - 0x366386, - 0x3bfdc6, - 0x389a83, - 0x3407c3, - 0x205443, - 0x21dcc3, - 0x31d4c3, - 0x310483, - 0x314143, - 0x212c85, - 0x2423c3, - 0x2423c6, - 0x21bd03, - 0x3c3688, - 0x236883, - 0x236889, - 0x263988, - 0x224888, - 0x22e2c5, - 0x23c04a, - 0x23d60a, - 0x24174b, - 0x245088, - 0x2ce083, - 0x216ec3, - 0x314183, - 0x2edd03, - 0x315888, - 0x334d83, - 0x3c8704, - 0x205e02, - 0x23fac3, - 0x265603, + 0x319643, + 0xee4f703, + 0x266543, + 0x206243, + 0x211a08, + 0x2b8306, + 0x3de586, + 0x2c3d46, + 0x268d87, + 0x213ac3, + 0x23ddc3, + 0x206543, + 0x297e06, + 0x2066c2, + 0x2eddc3, + 0x3423c5, + 0x2109c3, + 0x328fc7, + 0x161d783, + 0x23d5c3, + 0x236a03, + 0x2364c3, + 0x23c483, + 0x21f143, + 0x23e806, + 0x3a3286, + 0x383183, + 0x3cbd03, + 0x219683, + 0x217cc3, + 0x31c303, + 0x30e243, + 0x3118c3, + 0x3c1f05, + 0x237343, + 0x35ddc6, + 0x20ef83, + 0x3b98c8, + 0x20c4c3, + 0x3b75c9, + 0x20c4c8, + 0x21a188, + 0x21e605, + 0x22f40a, + 0x2302ca, + 0x232acb, + 0x234448, + 0x325383, + 0x217043, + 0x311903, + 0x2f2983, + 0x313948, + 0x336c83, + 0x21f984, + 0x2021c2, + 0x240b83, + 0x260e43, 0x2007c3, - 0x3af603, - 0x283143, - 0x23ae43, - 0x22ccc2, - 0x2233c3, - 0x246143, - 0x31fd43, - 0x321d84, - 0x260644, - 0x2355c3, - 0x1c3448, - 0xe31e00c, - 0xe6ea545, - 0xdefc5, + 0x23d943, + 0x2976c3, + 0x236e83, + 0x224b42, + 0x218083, + 0x23ba43, + 0x31e943, + 0x321744, + 0x24cf44, + 0x224343, + 0x1b9688, + 0xe31d0cc, + 0xe658b05, + 0xde305, 0x2000c2, 0x200b02, - 0x201902, - 0x2029c2, + 0x201c02, + 0x206182, 0x200202, - 0x205202, - 0x244b02, - 0x201b42, + 0x2011c2, + 0x278d02, + 0x201342, 0x200382, - 0x203f02, - 0x215142, - 0x20d3c2, - 0x282182, - 0x207002, - 0x20ea82, - 0x2035c2, - 0x216402, - 0x205442, - 0x24bf42, - 0x209982, + 0x205e42, + 0x2c7f42, + 0x203c42, + 0x27d202, + 0x206702, + 0x2061c2, + 0x2020c2, + 0x201402, + 0x202b02, + 0x245342, + 0x203f42, 0x200682, - 0x217ac2, - 0x201a42, - 0x206382, + 0x2039c2, + 0x212482, + 0x202ec2, 0x201042, - 0x230802, - 0x2019c2, + 0x20e7c2, + 0x20c642, 0xc2, 0xb02, - 0x1902, - 0x29c2, + 0x1c02, + 0x6182, 0x202, - 0x5202, - 0x44b02, - 0x1b42, + 0x11c2, + 0x78d02, + 0x1342, 0x382, - 0x3f02, - 0x15142, - 0xd3c2, - 0x82182, - 0x7002, - 0xea82, - 0x35c2, - 0x16402, - 0x5442, - 0x4bf42, - 0x9982, + 0x5e42, + 0xc7f42, + 0x3c42, + 0x7d202, + 0x6702, + 0x61c2, + 0x20c2, + 0x1402, + 0x2b02, + 0x45342, + 0x3f42, 0x682, - 0x17ac2, - 0x1a42, - 0x6382, + 0x39c2, + 0x12482, + 0x2ec2, 0x1042, - 0x30802, - 0x19c2, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, + 0xe7c2, + 0xc642, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, 0xf82, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0xa0789, - 0xb642, - 0x20b642, - 0x24acc3, - 0x10a1a043, - 0x20f583, - 0x1b1749, - 0x214e43, - 0xf5d07, - 0x2445c2, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x16e83, - 0x214883, - 0x24acc3, - 0x79c2, - 0x2001c2, - 0x1427d45, - 0x14f985, - 0x213c82, - 0x1c3448, - 0xb642, - 0x242882, - 0x206442, - 0x10210c, - 0x20da42, - 0x231a82, - 0x218882, - 0x1a4045, - 0x200dc2, - 0x201802, - 0x217a02, - 0x208e02, - 0x2035c2, - 0x24a242, - 0x214942, - 0x2b60c2, - 0x11a7f604, - 0x142, - 0xf5d87, - 0x15943, - 0x1db18d, - 0xfaf89, - 0xf140b, - 0xfdd48, - 0x73ec9, - 0x11b446, - 0x20f583, - 0x1c3448, - 0x1bfd04, - 0x2d03, - 0x1bee85, - 0x1c3448, - 0x1ec6c7, - 0x12c5ea87, - 0x13269384, - 0x6d446, - 0x1beec9, - 0xbea8e, - 0x10210c, - 0x146087, - 0x15bc683, - 0x13602142, - 0x149f49, - 0x1ed5c4, - 0x2000c2, - 0x265a04, - 0x20b642, - 0x21a043, - 0x203142, - 0x226783, - 0x26dc3, - 0x200382, - 0x2eef84, - 0x20bc83, - 0x2593c2, - 0x214883, - 0x18882, - 0x2003c2, - 0x24acc3, - 0x219086, - 0x33d48f, - 0x733483, - 0x1c3448, - 0x20b642, - 0x204703, - 0x20f583, - 0x214e43, - 0x14bdabc7, - 0x14a7606, - 0x1f3d06, - 0xf2389, - 0x14fcd6c8, - 0x1e6304, - 0x152cab0a, - 0x163988, - 0x15c16a87, - 0xd9f88, - 0xbea88, - 0x15e86cb, - 0x147fb0a, - 0x16070943, - 0x103189, - 0x1650b188, - 0x16b8e487, - 0x1463d4a, - 0x1507c07, - 0xba74b, - 0x16e8a2cc, - 0x16bf05, - 0xdc4c5, - 0x123409, - 0x1dbf44, - 0x11d443, - 0x156cac45, - 0x12ee43, - 0x15a35ac3, - 0x12ee43, - 0x15942, - 0x21c2, - 0xba42, - 0xba42, - 0x3d82, - 0xba42, - 0x3342, - 0x45c2, - 0x101c2, - 0x14f985, - 0xf5d87, - 0x1e6304, - 0x109484, - 0x20b642, - 0x21a043, - 0x20f583, - 0x214883, - 0x2000c2, - 0x209582, - 0x20aa82, - 0x17e1a043, - 0x24a202, - 0x226783, - 0x200bc2, - 0x203942, - 0x20f583, - 0x216e82, - 0x261042, - 0x22fcc2, - 0x20ad02, - 0x2a4fc2, - 0x200802, - 0x204142, - 0x201742, - 0x21dd82, - 0x209bc2, - 0x4484c, - 0x2cd142, - 0x283382, - 0x22b242, - 0x202582, - 0x214e43, - 0x201942, - 0x214883, - 0x20b482, - 0x24fdc2, - 0x24acc3, - 0x24dac2, - 0x206382, - 0x2213c2, - 0x200e02, - 0x22c602, - 0x215982, - 0x209b02, - 0x25c442, - 0x22b282, - 0x33060a, - 0x375dca, - 0x3a8f8a, - 0x3f43c2, - 0x2039c2, - 0x22b982, - 0x182fe909, - 0x187d6f0a, - 0x154be47, - 0x18a00fc2, - 0x1446b83, - 0x2902, - 0x1d6f0a, - 0x16e3ce, - 0x2346c4, - 0x107245, - 0x1921a043, - 0x4b8c3, - 0x226783, - 0x2607c4, - 0x20f583, - 0x25c204, - 0x20bc83, - 0x145e89, - 0x51fc6, - 0x214e43, - 0x100d44, - 0x1443, - 0x214883, - 0x9bf05, - 0x208c43, - 0x24acc3, - 0x1440704, - 0x2423c3, - 0x195547c4, - 0xd2a48, - 0x200f83, - 0x1c3448, - 0x6502, - 0x1527143, - 0x1333c6, - 0x1577ec4, - 0x4c85, - 0x1dbd4a, - 0x138682, - 0x1a00578d, - 0x1bdcc6, - 0x152051, - 0x1a6fe909, - 0x15d4ca, - 0x4d08, - 0x167d48, - 0x152e8e, - 0x89e53, - 0x210a78c7, - 0x1d42, - 0x148210, - 0x152c8c, - 0xfea94, - 0x1b87c7, - 0x24c0e, - 0x14f98b, - 0x1519cb, - 0x1c740a, - 0x133907, - 0x1c3448, - 0xb3d88, - 0xf747, - 0x21422bcb, - 0x23e46, - 0x26647, - 0x3ac2, - 0xef7cd, - 0x13e705, - 0x1d647, - 0x133c4a, - 0x1402cc, - 0x14048f, - 0xae74f, - 0x16e3c2, - 0xb642, - 0x9a148, - 0x21903e8c, - 0x1b204a, - 0x21f656ca, - 0xf638a, - 0x86cca, - 0x8ffc8, - 0x2d785, - 0x728c8, - 0xf6848, - 0x1ec688, - 0x153648, - 0x101c2, - 0xae4cf, - 0x1427dcd, - 0x6d8b, - 0xd5448, - 0x42b87, - 0x5940a, - 0x3e7cb, - 0xa9cc9, - 0x59307, - 0xf8506, - 0x2d688, - 0x3d7cc, - 0x1e9987, - 0x244ca, - 0xa3c8, - 0x11780e, - 0x12078e, - 0x13374b, - 0x16600b, - 0x3828b, - 0x3d309, - 0x4318b, - 0x4864d, - 0x4b94b, - 0x4ca4d, - 0x4cdcd, - 0x5a30a, - 0x6048b, - 0x56a0b, - 0x5a045, - 0x221cd710, - 0x80392, - 0x33011, - 0x176752, - 0x35f8f, - 0x7f7cf, - 0x15118d, - 0x85250, - 0xa842, - 0x22620948, - 0x1e9808, - 0x64f50, - 0x12c88e, - 0x22b77a85, - 0x5e68b, - 0x144f90, - 0xaaecb, - 0x1bc3cc, - 0x729ca, - 0x1661c9, - 0x735c8, - 0x78887, - 0x78bc7, - 0x78d87, - 0x79cc7, - 0x7b747, - 0x7bc07, - 0x7c307, - 0x7c747, - 0x7d207, - 0x7d587, - 0x7da47, - 0x7dc07, - 0x7ddc7, - 0x7df87, - 0x7e307, - 0x7e687, - 0x7ff87, - 0x80807, - 0x812c7, - 0x81587, - 0x81747, - 0x81a47, - 0x82047, - 0x82247, - 0x82bc7, - 0x82d87, - 0x82f47, - 0x83207, - 0x83847, - 0x83fc7, - 0x86b07, - 0x86f47, - 0x87747, - 0x87907, - 0x87f47, - 0x882c7, - 0x88c87, - 0x89087, - 0x893c7, - 0x89587, - 0x899c7, - 0x8a5c7, - 0x8b707, - 0x8bcc7, - 0x8be87, - 0x8c307, - 0x8cc47, - 0xef42, - 0xf694a, - 0x1b308, - 0x1c41cc, - 0x124c87, - 0x58f85, - 0x61591, - 0x7286, - 0x137bca, - 0x99fca, - 0x6d446, - 0x3f78b, - 0x642, - 0x3c691, - 0x131809, - 0xab8c9, - 0xace06, - 0x1742, - 0x70d0a, - 0xbf0c9, - 0xbf80f, - 0xbfe0e, - 0xc1bc8, - 0x22f53092, - 0x18208, - 0x232737c7, - 0xc468f, - 0x7142, - 0x178409, - 0x10044a, - 0x2361a389, - 0xdaec9, - 0xdaecc, - 0x19a4b, - 0x5824e, - 0x11c0c, - 0x102e8f, - 0x1c980e, - 0x50fcc, - 0x64d49, - 0x71a11, - 0x88448, - 0x92f12, - 0x94d8d, - 0x993cd, - 0x9ee4b, - 0x167f15, - 0x191549, - 0x1b6d0a, - 0x1df109, - 0xa27d0, - 0xa914b, - 0xb1c8f, - 0xb928b, - 0xc710c, - 0xc77d0, - 0xdfdca, - 0xf884d, - 0x12b38e, - 0x19f6ca, - 0xc8bcc, - 0xcf7d4, - 0x131491, - 0x1c470b, - 0xd82cf, - 0xd930d, - 0xda3ce, - 0xddf0c, - 0xde64c, - 0xdfacb, - 0xe1c0e, - 0xe4490, - 0xe534b, - 0xf740d, - 0x10fd8f, - 0x10eacc, - 0x11704e, - 0x185451, - 0x12ae8c, - 0x169d07, - 0x16bb8d, - 0x17600c, - 0x180f50, - 0x19b74d, - 0x19c3c7, - 0x1a5c90, - 0x1b3048, - 0xc814b, - 0xca18f, - 0x1c4448, - 0x57b8d, - 0x119c50, - 0x182fc9, - 0x23bcd6c8, - 0x23ece206, - 0xcf503, - 0x1ade89, - 0xade49, - 0xd4345, - 0xaf02, - 0x59889, - 0x6de0a, - 0x242933c6, - 0x14933cd, - 0x24706444, - 0x1e4986, - 0x28b4a, - 0x2af0d, - 0x24ae5b8b, - 0x1e9e08, - 0x24c65589, - 0x24343, - 0x17c7ca, - 0xf53d1, - 0xf5809, - 0xf6307, - 0xf7148, - 0xf8b87, - 0x736c8, - 0xae5cb, - 0x13b509, - 0x101590, - 0x101a4c, - 0x101ec9, - 0x10210c, - 0x2530250d, - 0x1037c8, - 0x103cc5, - 0x1d0fc8, - 0x1a4f4a, - 0x1b8ec7, - 0x25c2, - 0x25679555, - 0x145c8a, - 0x13e549, - 0x1ef4c8, - 0xae009, - 0x9605, - 0x12ab8a, - 0x1a07, - 0xa050f, - 0x16bf8b, - 0x1ec90c, - 0x30712, - 0xc0906, - 0x1580588, - 0x8e5c5, - 0x11d688, - 0x1e4acb, - 0xe6ad1, - 0x179d47, - 0x6e40a, - 0x18784c, - 0x25b0b045, - 0x1b72cc, - 0x25d129ce, - 0x142743, - 0x19ec86, - 0x4a242, - 0x11468b, - 0x11564a, - 0x151640c, - 0x1e9d08, - 0x4cc08, - 0x263ef546, - 0x17ef87, - 0x23f8e, - 0x1534c7, - 0x3b02, - 0x2fc2, - 0x188850, - 0x77e47, - 0x77f4f, - 0x1b506, - 0xa8b4e, - 0xb424b, - 0x5e0c8, - 0xaa089, - 0x150652, - 0x11dccd, - 0x11e848, - 0xf12c9, - 0x1a718d, - 0xe889, - 0x782cb, - 0x7ae88, - 0x7e488, - 0x80f88, - 0x81bc9, - 0x81dca, - 0x8254c, - 0x2398a, - 0xe094a, - 0x11d507, - 0x43a8a, - 0xf90c8, - 0x1d3e0d, - 0x62ad1, - 0x266da6c6, - 0x16d20b, - 0x3b48c, - 0xff48, - 0x1dc8c9, - 0x16548d, - 0x79f10, - 0x150dcc, - 0x15450d, - 0x10384f, - 0xba42, - 0x1df30d, - 0x3342, - 0x96c2, - 0x11d44a, - 0x26bd0d0a, - 0x32e4a, - 0x26e8bfc8, - 0x137aca, - 0x12460b, - 0x126c47, - 0x1b41cc, - 0x120a0c, - 0x12940a, - 0x2712968f, - 0x129a4c, - 0x129d47, - 0x12b70e, - 0x275f4285, - 0x1eab88, - 0x79c2, - 0x1424dc3, - 0x1aa0494e, - 0x1b2029ce, - 0x1bb2658a, - 0x1c33718e, - 0x1ca082ce, - 0x1d353c8c, - 0x154be47, - 0x155d589, - 0x1446b83, - 0x1db5ce8c, - 0x1e20a849, - 0x1ebd7e09, - 0x1f3dbb49, - 0x2902, - 0x4891, - 0x2911, - 0x1264cd, - 0x1370d1, - 0x1d4fd1, - 0x153bcf, - 0x15cdcf, - 0x1cc20c, - 0x1d7d4c, - 0x1dba8c, - 0xec7cd, - 0xfd515, - 0x13c14c, - 0x13e80c, - 0x179ed0, - 0x1c5acc, - 0x1c8a8c, - 0x1d17d9, - 0x1d9759, - 0x8019, - 0xa594, - 0xf8d4, - 0x107d4, - 0x10d54, - 0x1a994, - 0x1fa0fb89, - 0x20010a89, - 0x20b3e8c9, - 0x1ae88649, - 0x2902, - 0x1b688649, - 0x2902, - 0x800a, - 0x2902, - 0x1be88649, - 0x2902, - 0x800a, - 0x2902, - 0x1c688649, - 0x2902, - 0x1ce88649, - 0x2902, - 0x1d688649, - 0x2902, - 0x800a, - 0x2902, - 0x1de88649, - 0x2902, - 0x800a, - 0x2902, - 0x1e688649, - 0x2902, - 0x1ee88649, - 0x2902, - 0x800a, - 0x2902, - 0x1f688649, - 0x2902, - 0x800a, - 0x2902, - 0x1fe88649, - 0x2902, - 0x20688649, - 0x2902, - 0x20e88649, - 0x2902, - 0x800a, - 0x2902, - 0x1400401, - 0x152045, - 0x1c7404, - 0x1412f43, - 0x15f2603, - 0x141d983, - 0x94ec4, - 0x13b848, - 0x494e, - 0x29ce, - 0x9280e, - 0x12658a, - 0x13718e, - 0x82ce, - 0x153c8c, - 0x15ce8c, - 0xa849, - 0x1d7e09, - 0x1dbb49, - 0xfb89, - 0x10a89, - 0x13e8c9, - 0xfd70d, - 0x11009, - 0x1ac49, - 0x150a84, - 0x1720c4, - 0x199884, - 0x1cacc4, - 0xbaa04, - 0x1b9944, - 0x1e6a84, - 0x69004, - 0x1b404, - 0x642c4, - 0x334c9, - 0x334cc, - 0x159186, - 0x15918e, - 0x94ec4, - 0x159bf43, - 0x7e87, - 0x149440c, - 0x4d03, - 0x642c4, - 0xa842, - 0x51887, - 0x103e88, - 0x191dc8, - 0x4fd44, - 0x1cb8c6, - 0x54d47, - 0xe8b44, - 0x11c0c6, - 0x216c2, - 0x2d81, - 0x1d3a44, - 0x89cc6, - 0x22603, - 0xa842, - 0x4d03, - 0xe0143, - 0x30503, - 0x14743, - 0x117783, - 0x30705, - 0x83382, - 0x151682, - 0x1bf708, - 0xfa807, - 0x15743, - 0x13b587, - 0x101c2, - 0xf2389, - 0x2000c2, - 0x20b642, - 0x203142, - 0x21b5c2, - 0x200382, - 0x2003c2, - 0x202fc2, - 0x21a043, - 0x226783, - 0x20f583, - 0x231a43, - 0x214883, - 0x24acc3, - 0x1c3448, - 0x21a043, - 0x226783, - 0x214883, - 0x24acc3, - 0x13b83, - 0x20f583, - 0x5c204, - 0x2000c2, - 0x20bec3, - 0x29a1a043, - 0x3acb47, - 0x20f583, - 0x222043, - 0x225a04, - 0x214883, - 0x24acc3, - 0x228d8a, - 0x219085, - 0x205443, - 0x249902, - 0x1c3448, - 0x29eea54a, - 0xc01, - 0x1c3448, - 0xb642, - 0x140242, - 0x2a66530b, - 0x2aa1c1c4, - 0x106385, - 0x1405985, - 0x103e86, - 0x2ae05985, - 0x6a043, - 0xa7343, - 0x1bfd04, - 0x2d03, - 0x1bee85, - 0x14f985, - 0x1c3448, - 0x26647, - 0x1a043, - 0x38d4d, - 0x2b647207, - 0xcc6, - 0x2b953ac5, - 0x1b7892, - 0xd87, - 0x2f38a, - 0x2cf48, - 0x2f287, - 0x170aca, - 0x1becc8, - 0x7ac47, - 0x15ef8f, - 0x574c7, - 0x68e06, - 0x144f90, - 0x148e0c6, - 0x5058f, - 0x1bc09, - 0x1e4a04, - 0x2bc00e4e, - 0x51bc9, - 0x7e0c6, - 0xeb09, - 0x119186, - 0x9dc6, - 0xc3acc, - 0x3e9ca, - 0xa9e47, - 0x115f4a, - 0x1249, - 0x10514c, - 0x2b74a, - 0x5b00a, - 0x1beec9, - 0x1e4986, - 0xa9f0a, - 0x11edca, - 0xb6b4a, - 0x16f909, - 0xf4dc8, - 0xf5046, - 0xfb54d, - 0x10210c, - 0x6700b, - 0xd4905, - 0x2c38a7cc, - 0x146087, - 0x1e3989, - 0xdea07, - 0xbe494, - 0x11a04b, - 0xd528a, - 0x1504ca, - 0xbc64d, - 0x1524809, - 0x11da8c, - 0x11e64b, - 0x16a157, - 0x16ac95, - 0x3983, - 0x3983, - 0x38286, - 0x3983, - 0x103e88, - 0x15ee43, - 0x53544, - 0x1f184, - 0x1f18c, - 0x6a303, - 0x14b6c07, - 0xf79cd, - 0xaa045, - 0x151e183, - 0x1536ac8, - 0x67409, - 0x1b1749, - 0x30705, - 0x1e4acb, - 0x1c49cb, - 0x150ed43, - 0x150ed48, - 0x152986, - 0x145a4c7, - 0xa2a47, - 0x2d0a7789, - 0x14746, - 0xbec3, - 0x1c3448, - 0xb642, - 0x607c4, - 0x10210c, - 0x31d03, - 0x13a505, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x205d83, - 0x21a043, - 0x226783, - 0x204703, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x319283, - 0x200f83, - 0x205d83, - 0x265a04, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x211b43, - 0x212f03, - 0x249902, - 0x2e8a7885, - 0x1437143, - 0x21a043, - 0x226783, - 0x226dc3, - 0x204703, - 0x20f583, - 0x25c204, - 0x20de83, - 0x2384c3, - 0x214e43, - 0x214883, - 0x24acc3, - 0x205443, - 0x2f629e03, - 0x30609, - 0xb642, - 0x3f3903, - 0x3021a043, - 0x226783, - 0x259f43, - 0x20f583, - 0x224b03, - 0x2384c3, - 0x24acc3, - 0x205fc3, - 0x3c80c4, - 0x1c3448, - 0x30a1a043, - 0x226783, - 0x2c1c83, - 0x20f583, - 0x214e43, - 0x225a04, - 0x214883, - 0x24acc3, - 0x227603, - 0x1c3448, - 0x3121a043, - 0x226783, - 0x204703, - 0x10210c, - 0x208c43, - 0x24acc3, - 0x1c3448, - 0x154be47, - 0x20bec3, - 0x21a043, - 0x226783, - 0x20f583, - 0x25c204, - 0x225a04, - 0x214883, - 0x24acc3, - 0x14f985, - 0xf5d87, - 0xbe6cb, - 0x32241546, - 0xf5c04, - 0xd4905, - 0x1470708, - 0x2facd, - 0x1cd6c8, - 0x32a407c5, - 0x2af84, - 0xb642, - 0x18b43, - 0x159085, - 0x445c2, - 0x35cc45, - 0x1c3448, - 0x343341cd, - 0x3460510a, - 0x3982, - 0x20883, - 0x10210c, - 0x17290f, - 0x1b5c2, - 0x94ec4, - 0x642c4, - 0xb642, - 0x2000c2, - 0x20bec3, - 0x21a043, - 0x20f583, - 0x25c204, - 0x214e43, - 0x225a04, - 0x214883, - 0x24acc3, - 0x205443, - 0x21a043, - 0x226783, - 0x214883, - 0x24acc3, - 0x12c85, - 0x339e48, - 0x265a04, - 0x3d4506, - 0x3d7c06, - 0x1c3448, - 0x320543, - 0x23ab49, - 0x318455, - 0x11845f, - 0x21a043, - 0x9ebc7, - 0x221b92, - 0x18dc86, - 0x190485, - 0x729ca, - 0x1661c9, - 0x22194f, - 0xf5207, - 0x2eef84, - 0x225105, - 0x31aa10, - 0x284ac7, - 0x10210c, - 0x208c43, - 0x2a1948, - 0x64146, - 0x2937ca, - 0x2311c4, - 0x30ad83, - 0x249902, - 0x30614b, - 0x1c3203, - 0x226783, - 0x20f583, - 0x197184, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x310783, - 0x20b642, - 0x3cc83, - 0x100b04, - 0x214883, - 0x24acc3, - 0x36846ac5, - 0x1dc686, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, - 0x222043, - 0x222383, - 0x24acc3, - 0xbec3, - 0x20b642, - 0x21a043, - 0x226783, - 0x214883, - 0x24acc3, - 0x26c02, - 0x2000c2, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x5985, - 0x6a0c9, - 0x4d03, - 0x265a04, - 0x21a043, - 0x226783, - 0x230944, - 0x214883, - 0x24acc3, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x13bf09, - 0x11d44, - 0x21a043, - 0x101c2, - 0x226783, - 0x204703, - 0x211983, - 0x214e43, - 0x214883, - 0x24acc3, - 0x19c2, - 0x21a043, - 0x226783, - 0x20f583, - 0x3b9544, - 0x25c204, - 0x214883, - 0x24acc3, - 0x200f83, - 0x2742, - 0x20b642, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x161243, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x37ee83, - 0xf2c3, - 0x22043, - 0x214883, - 0x24acc3, - 0x3a086, - 0x33060a, - 0x34e149, - 0x369a8b, - 0x369eca, - 0x375dca, - 0x386bcb, - 0x39b20a, - 0x3a280a, - 0x3a8f8a, - 0x3a920b, - 0x3ce989, - 0x3de04a, - 0x3de88b, - 0x3ede8b, - 0x3f2fca, - 0x6282, - 0x21a043, - 0x226783, - 0x204703, - 0x214e43, - 0x214883, - 0x24acc3, - 0x610b, - 0x12c887, - 0x73b48, - 0x1a72c4, - 0x1e6304, - 0xa2388, - 0xf9b06, - 0xae706, - 0x23b87, - 0x12a947, - 0x3589, - 0x1c3448, - 0x21a043, - 0x727c4, - 0x278884, - 0x204e42, - 0x225a04, - 0x252f05, - 0x205d83, - 0x265a04, - 0x21a043, - 0x243544, - 0x226783, - 0x2607c4, - 0x2eef84, - 0x25c204, - 0x2384c3, - 0x214883, - 0x24acc3, - 0x2cf905, - 0x211b43, - 0x205443, - 0x236d03, - 0x223984, - 0x333344, - 0x246c85, - 0x1c3448, - 0x3ebc44, - 0x209f46, - 0x3bf004, - 0x20b642, - 0x230b07, - 0x256387, - 0x25d444, - 0x2fa885, - 0x39b945, - 0x239805, - 0x25c204, - 0x2716c8, - 0x2680c6, - 0x32f248, - 0x2f8245, - 0x2fc3c5, - 0x27a004, - 0x24acc3, - 0x30b844, - 0x385bc6, - 0x219183, - 0x223984, - 0x26ed05, - 0x259804, - 0x2b7944, - 0x249902, - 0x3a1ec6, - 0x3c1786, - 0x31be45, - 0x2000c2, - 0x20bec3, - 0xf9c86, - 0x3be0b642, - 0x220ac4, - 0x1981c4, - 0x6f005, - 0x200382, - 0x214e43, - 0x2736c2, - 0x214883, - 0x2003c2, - 0x308746, - 0x219303, - 0x1e4905, - 0x200f83, - 0x1c3448, - 0x1c3448, - 0x20f583, - 0x10210c, - 0x2000c2, - 0x3ca0b642, - 0x20f583, - 0x27dd43, - 0x20de83, - 0x21c1c4, - 0x214883, - 0x24acc3, - 0x1c3448, - 0xc0847, - 0x2000c2, - 0x3d20b642, - 0x21a043, - 0x214883, - 0x24acc3, - 0x682, - 0x20ea02, - 0x22ccc2, - 0x222043, - 0x305103, - 0x2000c2, - 0x14f985, - 0x1c3448, - 0xf5d87, - 0x20b642, - 0x226783, - 0x2607c4, - 0x207783, - 0x20f583, - 0x211983, - 0x214e43, - 0x214883, - 0x21c243, - 0x24acc3, - 0x2190c3, - 0xd2a48, - 0xf83, - 0x147593, - 0x14b954, - 0x14f985, - 0xf5d87, - 0x2f389, - 0x11c806, - 0x16f34b, - 0x38286, - 0x6bb47, - 0x1bc006, - 0x649, - 0x16278a, - 0x9e0cd, - 0x1dae8c, - 0x11f74a, - 0xab348, - 0x1a4045, - 0x2f3c8, - 0x1b506, - 0x1d9206, - 0x5f986, - 0x20a842, - 0x178884, - 0xe0146, - 0x14e754e, - 0x5086, - 0x7a60c, - 0x3e6a760b, - 0x14f985, - 0x15278b, - 0x3eb67b87, - 0x3ef67b8a, - 0x3f3d9144, - 0x88c9, - 0xfdc8, - 0x1c75c7, - 0x2de11, - 0x130dca, - 0x21a043, - 0x3f694308, - 0x170a45, - 0x1a0e88, - 0x36604, - 0x6e005, - 0xb8647, - 0x3f9d6d46, - 0xe044b, - 0x3ffc7cc9, - 0x181c5, - 0x61586, - 0x168e86, - 0xa528a, - 0xd184c, - 0x1ca843, - 0x1e6304, - 0x403d2484, - 0x67409, - 0x113107, - 0x10070a, - 0x14ea449, - 0x605, - 0x120703, - 0x406420c7, - 0x9bf05, - 0x1573546, - 0x14638c6, - 0x3f84c, - 0x10d308, - 0x40930fc5, - 0x40c4a243, - 0x113844, - 0x1d78b, - 0x14b0cb, - 0x4125ac0c, - 0x142d843, - 0xd61c8, - 0x1c49cb, - 0xb8509, - 0xd5603, - 0x124908, - 0x1428a86, - 0x9e747, - 0x41765489, - 0x42af0c48, - 0xbaf47, - 0xdc4ca, - 0x42f6a688, - 0x11e30d, - 0x1cc989, - 0x112c8, - 0x4d03, - 0x1486089, - 0x642c4, - 0x187d45, - 0x47143, - 0x38286, - 0x103e88, - 0x6502, - 0x1b404, - 0xaa345, - 0x1addc4, - 0x1437dc3, - 0x24dc7, - 0x41a24dc3, - 0x41fb8246, - 0x42244e04, - 0x42601b07, - 0x103e84, - 0x17ef87, - 0x103e84, - 0x17ef87, - 0x103e84, - 0x103e84, - 0x17ef87, - 0x5989, - 0x41, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x2000c2, - 0x20b642, - 0x20f583, - 0x206502, - 0x214883, - 0x24acc3, - 0x219303, - 0x38ef8f, - 0x38f34e, - 0x1c3448, - 0x21a043, - 0x52a87, - 0x226783, - 0x20f583, - 0x20bc83, - 0x214883, - 0x24acc3, - 0x4fc4, - 0x2e44, - 0xa04, - 0x203a83, - 0x203a87, - 0x203f42, - 0x27ee89, - 0x200b02, - 0x388b4b, - 0x2b154a, - 0x2b7d09, - 0x200542, - 0x2369c6, - 0x25ed95, - 0x388c95, - 0x261ad3, - 0x389213, - 0x204302, - 0x22bb45, - 0x3ea8cc, - 0x287c8b, - 0x269e45, - 0x2029c2, - 0x314102, - 0x377546, - 0x201d42, - 0x29ca06, - 0x36640d, - 0x3a3dcc, - 0x3cd784, - 0x200882, - 0x214202, - 0x2932c8, - 0x200202, - 0x202f06, - 0x3a480f, - 0x202f10, - 0x2ff204, - 0x25ef55, - 0x261c53, - 0x217143, - 0x35c78a, - 0x392b47, - 0x398a89, - 0x315cc7, - 0x21da82, - 0x200282, - 0x3d2386, - 0x20b102, - 0x1c3448, - 0x211f02, - 0x202642, - 0x2121c7, - 0x398e07, - 0x398e11, - 0x225505, - 0x22550e, - 0x225fcf, - 0x203ac2, - 0x227547, - 0x227708, - 0x201002, - 0x229b02, - 0x2143c6, - 0x2143cf, - 0x27b1d0, - 0x236f02, - 0x203d82, - 0x230c48, - 0x203d83, - 0x29b148, - 0x247d4d, - 0x2063c3, - 0x3d7a48, - 0x26c00f, - 0x26c3ce, - 0x35018a, - 0x2f4111, - 0x2f4590, - 0x3b0b8d, - 0x3b0ecc, - 0x3f3787, - 0x35c907, - 0x3d45c9, - 0x216f42, - 0x205202, - 0x26d8cc, - 0x26dbcb, - 0x2045c2, - 0x2dfc86, - 0x212f82, - 0x200482, - 0x36e3c2, - 0x20b642, - 0x239244, - 0x246887, - 0x203682, - 0x24d507, - 0x24f347, - 0x215942, - 0x218402, - 0x252945, - 0x203402, - 0x2d0bce, - 0x215b8d, - 0x226783, - 0x25880e, - 0x231e8d, - 0x34cc83, - 0x2017c2, - 0x2990c4, - 0x211542, - 0x22e042, - 0x3add05, - 0x3b3c47, - 0x257682, - 0x21b5c2, - 0x25f487, - 0x267788, - 0x2698c2, - 0x28e646, - 0x26d74c, - 0x26da8b, - 0x205b02, - 0x274b0f, - 0x274ed0, - 0x2752cf, - 0x275695, - 0x275bd4, - 0x2760ce, - 0x27644e, - 0x2767cf, - 0x276b8e, - 0x276f14, - 0x277413, - 0x2778cd, - 0x28ce09, - 0x2a0083, - 0x206582, - 0x364805, - 0x207786, - 0x200382, - 0x2e4f47, - 0x20f583, - 0x200642, - 0x38bcc8, - 0x2f4351, - 0x2f4790, - 0x204182, - 0x29f347, - 0x2040c2, - 0x271107, - 0x20af02, - 0x254e89, - 0x377507, - 0x29f808, - 0x3d6b86, - 0x305003, - 0x39e645, - 0x20b602, - 0x2004c2, - 0x34cb85, - 0x370845, - 0x209a42, - 0x237743, - 0x352b07, - 0x3d9487, - 0x203c82, - 0x39ac04, - 0x20a243, - 0x3f3b89, - 0x20a248, - 0x212a82, - 0x20fe02, - 0x22e807, - 0x3584c5, - 0x285788, - 0x286547, - 0x20ddc3, - 0x2dffc6, - 0x3b0a0d, - 0x3b0d8c, - 0x3a1086, - 0x206442, - 0x205942, - 0x200ec2, - 0x26be8f, - 0x26c28e, - 0x39b9c7, - 0x2024c2, - 0x214e45, - 0x214e46, - 0x22ab02, - 0x201942, - 0x2a0d06, - 0x24fb03, - 0x3d1206, - 0x2e8b05, - 0x2e8b0d, - 0x2e9095, - 0x2e990c, - 0x2e9c8d, - 0x2e9fd2, - 0x20d3c2, - 0x282182, - 0x10210c, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x99b49, + 0x2202, 0x202202, - 0x379cc6, - 0x211ac6, - 0x4429eb44, - 0x2025c2, - 0x207806, - 0x217a02, - 0x254405, - 0x203cc2, - 0x215c89, - 0x2161cc, - 0x21650b, - 0x2003c2, - 0x268908, - 0x201982, - 0x207002, - 0x287a46, - 0x2885c5, - 0x394447, - 0x358845, - 0x27a8c5, - 0x204d82, - 0x22f482, - 0x2035c2, - 0x2b1f87, - 0x30880d, - 0x308b8c, - 0x2517c7, - 0x28e5c2, - 0x216402, - 0x379688, - 0x259a08, - 0x327448, - 0x3c4404, - 0x2e1e87, - 0x3064c3, - 0x209ec2, - 0x211b42, - 0x309589, - 0x317c07, - 0x205442, - 0x288085, - 0x221e42, - 0x223c42, - 0x30f203, - 0x30f206, - 0x310482, - 0x313702, - 0x200402, - 0x234586, - 0x357f07, - 0x21d602, - 0x200902, - 0x29af8f, - 0x25864d, - 0x2db38e, - 0x231d0c, - 0x20c802, - 0x2026c2, - 0x3d69c5, - 0x32eb06, - 0x201d82, - 0x209982, - 0x200682, - 0x232004, - 0x354204, - 0x360a46, - 0x202fc2, - 0x29bdc7, - 0x234683, - 0x249088, - 0x249a48, - 0x250e47, - 0x38e646, - 0x201a82, - 0x232603, - 0x232607, - 0x2875c6, - 0x2d6385, - 0x257ec8, - 0x2075c2, - 0x2fc8c7, - 0x230802, - 0x2a7382, - 0x206342, - 0x2030c9, - 0x209202, - 0x111908, - 0x201682, - 0x2b0b43, - 0x336e87, - 0x201b02, - 0x21634c, - 0x21664b, - 0x3a1106, - 0x30f905, - 0x4462b803, - 0x2021c2, - 0x2019c2, - 0x2d7cc6, - 0x234b83, - 0x3b92c7, - 0x20ea42, - 0x2008c2, - 0x25ec15, - 0x388e55, - 0x261993, - 0x389393, - 0x279a47, - 0x28b891, - 0x295090, - 0x29a312, - 0x29d851, - 0x2a1cc8, - 0x2a1cd0, - 0x2a5d0f, - 0x2b1313, - 0x2b7ad2, - 0x2c3490, - 0x376b8f, - 0x39c552, - 0x2c83d1, - 0x2cb893, - 0x2ce352, - 0x2e874f, - 0x2eb08e, - 0x2efed2, - 0x2f2f91, - 0x2f384f, - 0x2f6dce, - 0x2f96d1, - 0x2fbd50, - 0x3047d2, - 0x307e91, - 0x30ca90, - 0x30d4cf, - 0x310811, - 0x314290, - 0x314d46, - 0x327a47, - 0x2284c7, - 0x2027c2, - 0x296485, - 0x3e0bc7, - 0x22ccc2, - 0x205a42, - 0x3d0f05, - 0x20c703, - 0x378306, - 0x3089cd, - 0x308d0c, - 0x207882, - 0x3ea74b, - 0x287b4a, - 0x28a70a, - 0x22ba49, - 0x2d6e8b, - 0x30798d, - 0x28668c, - 0x31ae8a, - 0x25018c, - 0x25360b, - 0x269c8c, - 0x28414e, - 0x28c48b, - 0x2acb0c, - 0x2ccf43, - 0x37ef06, - 0x36a682, - 0x227e42, - 0x26fd83, - 0x201102, - 0x220983, - 0x2da0c6, - 0x275847, - 0x2dd0c6, - 0x3b2208, - 0x352988, - 0x32cb46, - 0x20b382, - 0x31b80d, - 0x31bb4c, - 0x34b287, - 0x31f587, - 0x226402, - 0x2216c2, - 0x209142, - 0x292602, - 0x33f716, - 0x344395, - 0x347056, - 0x34d093, - 0x34d752, - 0x35e9d3, - 0x35f512, - 0x3c048f, - 0x3cfb98, - 0x3d1e17, - 0x3d5419, - 0x3d7198, - 0x3d8058, - 0x3d8bd7, - 0x3d9d97, - 0x3dc056, - 0x3e0713, - 0x3e0d95, - 0x3e17d2, - 0x3e1c53, - 0x16f02, - 0x44a03584, - 0x44fcd6c8, - 0x5985, - 0x20b642, - 0x214883, - 0x445c2, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x225a04, - 0x214883, - 0x24acc3, - 0x219303, + 0x21f143, + 0x10a06643, + 0x205503, + 0xdfa89, + 0x206543, + 0xf0847, + 0x2232c2, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x17003, + 0x2109c3, + 0x21f143, + 0x3642, + 0x2001c2, + 0x1421805, + 0x14cb05, + 0x2886c2, + 0x1b9688, + 0x2202, + 0x237e02, + 0x202f82, + 0xf9d4c, + 0x2104c2, + 0x20fcc2, + 0x212282, + 0x1cf805, + 0x200dc2, + 0x201582, + 0x203902, + 0x201542, + 0x2020c2, + 0x2413c2, + 0x210a82, + 0x242402, + 0x11a7a6c4, + 0x142, + 0xf08c7, + 0x42983, + 0xdc40d, + 0xf42c9, + 0x1280b, + 0xf8108, + 0x66fc9, + 0x122ece45, + 0x119e46, + 0x137d09, + 0x205503, + 0x1b9688, + 0x1a31c4, + 0x1d45c3, + 0x1a2345, + 0x1b9688, + 0x1dd507, + 0x13053907, + 0x1365f684, + 0x63646, + 0x1a2389, + 0xb728e, + 0xf9d4c, + 0x144207, + 0x15b5c83, + 0x13a01ac2, + 0x147849, + 0x1d5004, 0x2000c2, - 0x204502, - 0x45ea6b05, - 0x462a6305, - 0x46653946, - 0x1c3448, - 0x46ace985, - 0x20b642, - 0x203142, - 0x46f25e05, - 0x47294c85, - 0x47695b87, - 0x47af7ec9, - 0x47fbb404, + 0x214f04, + 0x202202, + 0x206643, + 0x201482, + 0x21f603, + 0xfd03, + 0x200382, + 0x2e4084, + 0x211e43, + 0x24e442, + 0x2109c3, + 0x12282, + 0x2003c2, + 0x21f143, + 0x212fc6, + 0x33940f, + 0x7de983, + 0x1b9688, + 0x202202, + 0x3d6403, + 0x205503, + 0x206543, + 0x14fd3587, + 0x1572a46, + 0x1ee286, + 0xd9c89, + 0x153c7448, + 0x1e8684, + 0x156c3fca, + 0x7d848, + 0x16016c07, + 0x1c2588, + 0xb7288, + 0x15dcd8b, + 0x147abca, + 0x16467cc3, + 0xfac49, + 0x1690a248, + 0x16e38a47, + 0x14eb44a, + 0x1506147, + 0xb1e8b, + 0x1729e38c, + 0x164685, + 0xe0405, + 0x1231c9, + 0x1029c4, + 0x11c283, + 0x15ac4105, + 0x12c843, + 0x15e2c1c3, + 0x12c843, + 0x42982, + 0x1b42, + 0x5fc2, + 0x5fc2, + 0x1782, + 0x5fc2, + 0x2642, + 0x3402, + 0x23c2, + 0x14cb05, + 0xf08c7, + 0x1e8684, + 0x107e04, + 0x202202, + 0x206643, + 0x205503, + 0x2109c3, + 0x2000c2, + 0x2087c2, + 0x205a42, + 0x18206643, + 0x241382, + 0x21f603, + 0x200bc2, + 0x234382, + 0x205503, + 0x2062c2, + 0x2697c2, + 0x2208c2, + 0x207002, + 0x29cfc2, + 0x200802, + 0x203582, + 0x217382, + 0x20bd82, + 0x20af02, + 0x1610cc, + 0x2c7002, + 0x27e5c2, + 0x230a02, + 0x201f02, + 0x206543, + 0x201502, + 0x2109c3, + 0x239e02, + 0x246102, + 0x21f143, + 0x244242, + 0x202ec2, + 0x206ec2, + 0x200e02, + 0x203d02, + 0x2429c2, + 0x20ae42, + 0x250602, + 0x230a42, + 0x32e0ca, + 0x36fb0a, + 0x3a1a4a, + 0x3f4442, + 0x205382, + 0x369cc2, + 0x186fc749, + 0x18b547ca, + 0x1549207, + 0x18e00fc2, + 0x143bfc3, + 0x4942, + 0x1547ca, + 0x1685ce, + 0x204884, + 0x105785, + 0x19606643, + 0x42dc3, + 0x21f603, + 0x2554c4, + 0x205503, + 0x2503c4, + 0x211e43, + 0x144009, + 0x1d4086, + 0x206543, + 0xf8984, + 0x146ec3, + 0x2109c3, + 0x1f45, + 0x21d783, + 0x21f143, + 0x1445a04, + 0x237343, + 0x1994e6c4, + 0xcbd48, + 0x200f83, + 0x1b9688, + 0x3042, + 0x1533a43, + 0x1de8c6, + 0x15dde84, + 0x1d6985, + 0x1027ca, + 0x134f82, + 0x1a5dec0d, + 0x1b32c6, + 0x6f51, + 0x1aafc749, + 0x159c8a, + 0x1d6a08, + 0x8c1c8, + 0x145cce, + 0x54b13, + 0x21572d07, + 0x28c2, + 0x13a810, + 0x145acc, + 0xfc8d4, + 0xb0407, + 0x1a50e, + 0x14cb0b, + 0x14eecb, + 0x1bd04a, + 0x342c7, + 0x1b9688, + 0xb4d88, + 0x8ec7, + 0x2181ae0b, + 0x1c446, + 0x1f4c7, + 0x2fc2, + 0x10fa8d, + 0x149b45, + 0x69347, + 0x2ad8a, + 0x13e30c, + 0x13e4cf, + 0x11f64f, + 0x1547c2, + 0x2202, + 0xe9e08, + 0x21cfbc4c, + 0x1a8b0a, + 0x22361b8a, + 0xf10ca, + 0x800ca, + 0x88508, + 0x26085, + 0x6b5c8, + 0xf1588, + 0x1dd4c8, + 0x146488, + 0x23c2, + 0x11f3cf, + 0x142188d, + 0x140e4d2, + 0x1ccf8b, + 0xc9a08, + 0x38107, + 0x4e48a, + 0x12bccb, + 0xa24c9, + 0x4e387, + 0x76706, + 0x25f88, + 0x3048c, + 0x1d9d47, + 0x1caca, + 0x7908, + 0x15f00e, + 0x19028e, + 0x3410b, + 0x3e48b, + 0x3ed0b, + 0x41a09, + 0x42e4b, + 0x4334d, + 0x44d4b, + 0x4978d, + 0x49b0d, + 0x5250a, + 0x4cd8b, + 0x4d24b, + 0x52185, + 0x225c7490, + 0x2c68f, + 0x7a88f, + 0x10ff4d, + 0x57f50, + 0x4c02, + 0x22a2fd08, + 0x1d9bc8, + 0x80990, + 0x12ae8e, + 0x22f726c5, + 0x5314b, + 0x143110, + 0x59bc5, + 0xa380b, + 0x1b178c, + 0x6b6ca, + 0x3e649, + 0x6c448, + 0x72547, + 0x72887, + 0x72a47, + 0x73ac7, + 0x75207, + 0x75607, + 0x77787, + 0x77c87, + 0x78187, + 0x78507, + 0x789c7, + 0x78b87, + 0x78d47, + 0x78f07, + 0x79287, + 0x79747, + 0x7b047, + 0x7b507, + 0x7c107, + 0x7c407, + 0x7c5c7, + 0x7c8c7, + 0x7d0c7, + 0x7d2c7, + 0x7dcc7, + 0x7de87, + 0x7e047, + 0x7e447, + 0x7ea87, + 0x7f447, + 0x7ff07, + 0x80347, + 0x81087, + 0x81247, + 0x81887, + 0x81c07, + 0x82607, + 0x82a07, + 0x82d47, + 0x82f07, + 0x83347, + 0x83a47, + 0x842c7, + 0x846c7, + 0x84887, + 0x84d07, + 0x85647, + 0xf168a, + 0x15b48, + 0x1ba40c, + 0x1416c7, + 0x98385, + 0x1e1a51, + 0x14d146, + 0x12428a, + 0xe9c8a, + 0x63646, + 0x15cdcb, + 0x642, + 0x31391, + 0x168c89, + 0xd1e49, + 0xa48c6, + 0x17382, + 0x6808a, + 0xb78c9, + 0xb800f, + 0xb860e, + 0xbac08, + 0x23345ed2, + 0x11608, + 0x2366c647, + 0xbdacf, + 0x15fc2, + 0x1de3c9, + 0x1ca20a, + 0x23a14609, + 0xd4389, + 0xd438c, + 0x604b, + 0x9670e, + 0x1cdb8c, + 0xfa94f, + 0x1c02ce, + 0x56a4c, + 0x80789, + 0x81d91, + 0x8b988, + 0x8c392, + 0x8e20d, + 0x9198d, + 0x95d0b, + 0x18a455, + 0x1e0b49, + 0x9a68a, + 0x9ecc9, + 0xa3d50, + 0xae18b, + 0xb0a0f, + 0xc054b, + 0xc0bcc, + 0x19bb50, + 0x17094a, + 0x17a88d, + 0x197cce, + 0xc1eca, + 0x12cd4c, + 0xc9d14, + 0xd1ad1, + 0xd228b, + 0xd338f, + 0xd6fcd, + 0xd7ace, + 0xd8b8c, + 0xda10c, + 0x19b84b, + 0x1ef70e, + 0xddad0, + 0xf218b, + 0xf728d, + 0x11290f, + 0x1090cc, + 0x10d60e, + 0x115111, + 0x1b124c, + 0x14b107, + 0x16430d, + 0x16fd4c, + 0x17a2d0, + 0x19510d, + 0x195f07, + 0x199490, + 0x1a9b08, + 0xc144b, + 0xc364f, + 0x1ba688, + 0x5450d, + 0x117510, + 0x17c789, + 0x23fc7448, + 0x242c7fc6, + 0xc8bc3, + 0x1aa949, + 0xa5909, + 0xcd6c5, + 0x6982, + 0x1289, + 0x4e90a, + 0x2468c846, + 0x148c84d, + 0x24b283d1, + 0x24f04984, + 0x1e7086, + 0x2294a, + 0x1ec4d, + 0x252e098b, + 0x1da1c8, + 0x25460dc9, + 0x1c943, + 0x14880a, + 0xeff11, + 0xf0349, + 0xf1047, + 0xf1ec8, + 0xf2447, + 0x6c548, + 0x70cb, + 0x1379c9, + 0xf91d0, + 0xf968c, + 0xf9b09, + 0xf9d4c, + 0x25afa14d, + 0xfb588, + 0xfba85, + 0x88088, + 0x19dc8a, + 0x16ab87, + 0x1f42, + 0x25e21e95, + 0x143e0a, + 0x149989, + 0xa5ac8, + 0x11ef09, + 0x86905, + 0x128e4a, + 0xfdcc7, + 0x998cf, + 0x16470b, + 0x13ba0c, + 0x28d52, + 0x126a06, + 0x14ff548, + 0x86f45, + 0x1282c8, + 0x10154b, + 0xe32d1, + 0x100507, + 0x557ca, + 0x180f0c, + 0x2630a105, + 0x1ae7cc, + 0x265104ce, + 0x140943, + 0x198e46, + 0x413c2, + 0x111e8b, + 0x11370a, + 0x15144cc, + 0x1da0c8, + 0x49948, + 0x26aa5b46, + 0x125f07, + 0x1c58e, + 0x146307, + 0x10002, + 0x4842, + 0x5a590, + 0x6aac7, + 0x6abcf, + 0x15d46, + 0xaa4ce, + 0xbc10b, + 0x5a3c8, + 0xa2889, + 0x15252, + 0x11cd8d, + 0x11d908, + 0x126c9, + 0x6af4d, + 0x6b909, + 0x6cd4b, + 0x70e88, + 0x77f88, + 0x79408, + 0x7bc89, + 0x7be8a, + 0x7ca4c, + 0x1bc0a, + 0xe3007, + 0xe824a, + 0x11c347, + 0x3980a, + 0xf4788, + 0x1d880d, + 0xa1411, + 0x26ed7dc6, + 0x16cbcb, + 0x1dafcc, + 0x1be08, + 0x1d7589, + 0x16194d, + 0x73d10, + 0x6a28c, + 0x1e1e4d, + 0xfb60f, + 0x5fc2, + 0x9eecd, + 0x2642, + 0x41d82, + 0x11c28a, + 0x272948ca, + 0x2a08a, + 0x276849c8, + 0x12418a, + 0x12454b, + 0x125507, + 0x1ab54c, + 0x19050c, + 0x1277ca, + 0x27927a4f, + 0x127e0c, + 0x128107, + 0x12948e, + 0x27df4305, + 0x1a20c8, + 0x3642, + 0x141a6c3, + 0x1afc660e, + 0x1b7d428e, + 0x1bf47e8a, + 0x1c7c414e, + 0x1cf4de0e, + 0x1d75910c, + 0x1549207, + 0x1559d49, + 0x143bfc3, + 0x1dfbf54c, + 0x1e604c09, + 0x1ef00749, + 0x1f7025c9, + 0x4942, + 0x1d6591, + 0x1d41d1, + 0x147dcd, + 0x1c4091, + 0x14dd51, + 0x15904f, + 0x1bf48f, + 0x1d06cc, + 0x10068c, + 0x10250c, + 0x106d8d, + 0x191c55, + 0x132f4c, + 0x137f0c, + 0x149c50, + 0x15040c, + 0x1bb70c, + 0x1c6359, + 0x1d25d9, + 0x1eacd9, + 0x4954, + 0x7ad4, + 0x9054, + 0x9c54, + 0xa1d4, + 0x1fe07d89, + 0x20409309, + 0x20f37fc9, + 0x1b28bb89, + 0x4942, + 0x1ba8bb89, + 0x4942, + 0x494a, + 0x4942, + 0x1c28bb89, + 0x4942, + 0x494a, + 0x4942, + 0x1ca8bb89, + 0x4942, + 0x1d28bb89, + 0x4942, + 0x1da8bb89, + 0x4942, + 0x494a, + 0x4942, + 0x1e28bb89, + 0x4942, + 0x494a, + 0x4942, + 0x1ea8bb89, + 0x4942, + 0x1f28bb89, + 0x4942, + 0x494a, + 0x4942, + 0x1fa8bb89, + 0x4942, + 0x494a, + 0x4942, + 0x2028bb89, + 0x4942, + 0x20a8bb89, + 0x4942, + 0x2128bb89, + 0x4942, + 0x494a, + 0x4942, + 0x1400401, + 0x6f45, + 0x1bd044, + 0x1414fc3, + 0x141da83, + 0x1469683, + 0x8e344, + 0x137d08, + 0x1c660e, + 0x1d428e, + 0x8b28e, + 0x147e8a, + 0x1c414e, + 0x14de0e, + 0x15910c, + 0x1bf54c, + 0x4c09, + 0x100749, + 0x1025c9, + 0x7d89, + 0x9309, + 0x137fc9, + 0x149d0d, + 0x9f09, + 0xa489, + 0x175544, + 0x182384, + 0x192804, + 0x1a2284, + 0xb2144, + 0x16b604, + 0x1e8e04, + 0x189f04, + 0x15c44, + 0x4ac44, + 0xff009, + 0xff00c, + 0x157f86, + 0x157f8e, + 0x8e344, + 0x1595903, + 0x2b447, + 0x148d88c, + 0x15e42, + 0x15c43, + 0x4ac44, + 0x4c02, + 0x37507, + 0xfbc48, + 0x1ae288, + 0x46084, + 0x5746, + 0x13a4c7, + 0xe2c44, + 0x127386, + 0x19882, + 0x8f81, + 0x22504, + 0x54986, + 0x27303, + 0x4c02, + 0x15c43, + 0x124403, + 0x28b43, + 0xe983, + 0x1c80c3, + 0x28d45, + 0x7e5c2, + 0x14eb82, + 0x1a2bc8, + 0xf3c87, + 0x56603, + 0x137a47, + 0x23c2, + 0xd9c89, + 0x2000c2, + 0x202202, + 0x201482, + 0x20fd02, + 0x200382, + 0x2003c2, + 0x204842, + 0x206643, + 0x21f603, + 0x205503, + 0x20fc83, + 0x2109c3, + 0x21f143, + 0x1b9688, + 0x206643, + 0x21f603, + 0x211e43, + 0x262784, + 0x21f143, + 0x206643, + 0x21f603, + 0x2109c3, + 0x21f143, + 0xbd03, + 0x205503, + 0x503c4, + 0x2000c2, + 0x250b03, + 0x2a206643, + 0x3a5f47, + 0x205503, + 0x20f743, + 0x294744, + 0x2109c3, + 0x21f143, + 0x3b6f0a, + 0x212fc5, + 0x219683, + 0x23d942, + 0x1b9688, + 0x2a6e5b4a, + 0xc01, + 0x1b9688, + 0x2202, + 0x13e282, + 0x2ae60b4b, + 0x2b20f444, + 0x1048c5, + 0x1402c45, + 0xfbc46, + 0x2b602c45, + 0x5fb83, + 0xb2683, + 0x1a31c4, + 0x1d45c3, + 0x1a2345, + 0x14cb05, + 0x1b9688, + 0x1f4c7, + 0x6643, + 0x2e70d, + 0x2be3c647, + 0xcc6, + 0x2c14dc45, + 0x1cb4d2, + 0xd87, + 0x26e4a, + 0x24dc8, + 0x26d47, + 0xfe08a, + 0x1b42c8, + 0x74a47, + 0x15c18f, + 0x4ed47, + 0x71b06, + 0x143110, + 0x1486a46, + 0x124c8f, + 0xee89, + 0x1e7104, + 0x2c400e4e, + 0x2ca0d84c, + 0x37849, + 0x79046, + 0x6bb89, + 0x116a86, + 0x173cc6, + 0xbc98c, + 0x12beca, + 0xa2647, + 0x11400a, + 0x146cc9, + 0x10368c, + 0x2410a, + 0x4deca, + 0x1a2389, + 0x1e7086, + 0xa270a, + 0x1aae8a, + 0xad3ca, + 0x1f06c9, + 0xef888, + 0xefb86, + 0xf4bcd, + 0xf9d4c, + 0x55f8b, + 0xdd585, + 0x2d322c8c, + 0x144207, + 0x1f0189, + 0xda4c7, + 0xba714, + 0x117a8b, + 0xc984a, + 0x150ca, + 0xb500d, + 0x1524749, + 0x11cb4c, + 0x11d70b, + 0x164c57, + 0x165ed5, + 0x7903, + 0x7903, + 0x34106, + 0x7903, + 0x2ce04b02, + 0x28d45, + 0xfbc48, + 0x15b243, + 0x49f04, + 0x17804, + 0x1780c, + 0x60483, + 0x14ad487, + 0x1702cd, + 0x15205, + 0x142a2c3, + 0x142a2c8, + 0x5c4c9, + 0xdfa89, + 0x28d45, + 0x10154b, + 0xd254b, + 0x1509343, + 0x1509348, + 0x1106, + 0x14526c7, + 0xa3fc7, + 0x2e172bc9, + 0x10886, + 0x50b03, + 0x1b9688, + 0x2202, + 0x554c4, + 0xf9d4c, + 0xff43, + 0x13d845, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x2028c3, + 0x206643, + 0x21f603, + 0x3d6403, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x2d2003, + 0x200f83, + 0x2028c3, + 0x214f04, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x2330c3, + 0x214f83, + 0x23d942, + 0x2f972cc5, + 0x142d603, + 0x206643, + 0x21f603, + 0x20fd03, + 0x3d6403, + 0x205503, + 0x2503c4, + 0x33c683, + 0x23ddc3, + 0x206543, + 0x2109c3, + 0x21f143, + 0x219683, + 0x30623443, + 0x28c49, + 0x2202, + 0x23e1c3, + 0x31206643, + 0x21f603, + 0x24ec43, + 0x205503, + 0x21a403, + 0x23ddc3, + 0x21f143, + 0x202b03, + 0x3eda84, + 0x1b9688, + 0x31a06643, + 0x21f603, + 0x2bacc3, + 0x205503, + 0x206543, + 0x294744, + 0x2109c3, + 0x21f143, + 0x2210c3, + 0x1b9688, + 0x32206643, + 0x21f603, + 0x3d6403, + 0xf9d4c, + 0x21d783, + 0x21f143, + 0x1b9688, + 0x1549207, + 0x250b03, + 0x206643, + 0x21f603, + 0x205503, + 0x2503c4, + 0x294744, + 0x2109c3, + 0x21f143, + 0x14cb05, + 0xf08c7, + 0xba94b, + 0x332328c6, + 0xf0744, + 0xdd585, + 0x147e248, + 0x206cd, + 0x1c7448, + 0x33a47285, + 0x1ecc4, + 0x2202, + 0x1c36c3, + 0x157e85, + 0x232c2, + 0x34db45, + 0x1b9688, + 0x353ddb4d, + 0x357d520a, + 0x7902, + 0x21483, + 0xf9d4c, + 0x16be4f, + 0xfd02, + 0x8e344, + 0x4ac44, + 0x2202, + 0x2000c2, + 0x250b03, + 0x206643, + 0x205503, + 0x2503c4, + 0x206543, + 0x294744, + 0x2109c3, + 0x21f143, + 0x219683, + 0x206643, + 0x21f603, + 0x2109c3, + 0x21f143, + 0x1c1f05, + 0x3365c8, + 0x214f04, + 0x3c2986, + 0x3d0586, + 0x1b9688, + 0x30f143, + 0x3be309, + 0x2b9815, + 0xb981f, + 0x206643, + 0x95a87, + 0x219d52, + 0x187746, + 0x189285, + 0x6b6ca, + 0x3e649, + 0x219b0f, + 0xefd47, + 0x2e4084, + 0x2243c5, + 0x319410, + 0x2577c7, + 0xf9d4c, + 0x21d783, + 0x23d5c8, + 0x4aac6, + 0x28cc4a, + 0x229804, + 0x309b43, + 0x23d942, + 0x30468b, + 0x1b9443, + 0x21f603, + 0x205503, + 0x18fc84, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x30e543, + 0x202202, + 0x31983, + 0x58cc4, + 0x2109c3, + 0x21f143, + 0x3783bf05, + 0x1d7346, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, + 0x20f743, + 0x21ddc3, + 0x21f143, + 0x50b03, + 0x202202, + 0x206643, + 0x21f603, + 0x2109c3, + 0x21f143, + 0x31e02, + 0x2000c2, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x2c45, + 0x74fc9, + 0x141eccb, + 0x15c43, + 0x214f04, + 0x206643, + 0x21f603, + 0x228f84, + 0x2109c3, + 0x21f143, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x132d09, + 0x6204, + 0x206643, + 0x23c2, + 0x21f603, + 0x3d6403, + 0x2036c3, + 0x206543, + 0x2109c3, + 0x21f143, + 0xc642, + 0x206643, + 0x21f603, + 0x205503, + 0x36b204, + 0x2503c4, + 0x2109c3, + 0x21f143, + 0x200f83, + 0x8a42, + 0x202202, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x15d3c3, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x325e03, + 0x52383, + 0xf743, + 0x2109c3, + 0x21f143, + 0x2f7c6, + 0x32e0ca, + 0x34b2c9, + 0x36408b, + 0x3649ca, + 0x36fb0a, + 0x38028b, + 0x394bca, + 0x39b18a, + 0x3a1a4a, + 0x3a1ccb, + 0x3c7d89, + 0x3dfe4a, + 0x3e02cb, + 0x3ebacb, + 0x3f274a, + 0x2dc2, + 0x206643, + 0x21f603, + 0x3d6403, + 0x206543, + 0x2109c3, + 0x21f143, + 0x2c4b, + 0x12ae87, + 0x6c9c8, + 0x6b084, + 0x1e8684, + 0x9b108, + 0xf2f86, + 0x7206, + 0x3bb07, + 0x128c07, + 0xf8589, + 0x1b9688, + 0x206643, + 0x3e644, + 0x272544, + 0x20b082, + 0x294744, + 0x231c05, + 0x2028c3, + 0x214f04, + 0x206643, + 0x2392c4, + 0x21f603, + 0x2554c4, + 0x2e4084, + 0x2503c4, + 0x23ddc3, + 0x2109c3, + 0x21f143, + 0x29f085, + 0x2330c3, + 0x219683, + 0x27b403, + 0x21bc04, + 0x3de844, + 0x23c0c5, + 0x1b9688, + 0x3c3704, + 0x3c1386, + 0x3a24c4, + 0x202202, + 0x229147, + 0x24c707, + 0x251604, + 0x2f3d05, + 0x395305, + 0x22f2c5, + 0x2503c4, + 0x268e48, + 0x238646, + 0x365588, + 0x276445, + 0x2dfa85, + 0x273e04, + 0x21f143, + 0x30a904, + 0x37f286, + 0x2130c3, + 0x21bc04, + 0x275485, + 0x24e884, + 0x2adec4, + 0x23d942, + 0x257106, + 0x3b5b46, + 0x31a605, + 0x2000c2, + 0x250b03, + 0xf3106, + 0x3ce02202, + 0x21ce04, + 0x191384, + 0x65685, + 0x200382, + 0x206543, + 0x3d2070c2, + 0x2109c3, + 0x2003c2, + 0x3070c6, + 0x213dc3, + 0x1e7005, + 0x200f83, + 0x1b9688, + 0x14f8cc3, + 0x1b9688, + 0x205503, + 0xf9d4c, + 0x2000c2, + 0x3de02202, + 0x205503, + 0x278cc3, + 0x33c683, + 0x20f444, + 0x2109c3, + 0x21f143, + 0x1b9688, + 0x126947, + 0x5860a, + 0x2000c2, + 0x3e602202, + 0x206643, + 0x2109c3, + 0x21f143, + 0x682, + 0x209a02, + 0x224b42, + 0x20f743, + 0x303643, + 0x2000c2, + 0x14cb05, + 0x1b9688, + 0xf08c7, + 0x202202, + 0x21f603, + 0x2554c4, + 0x203b43, + 0x205503, + 0x2036c3, + 0x206543, + 0x2109c3, + 0x20f4c3, + 0x21f143, + 0x213003, + 0xcbd48, + 0xf83, + 0x145213, + 0x148d14, + 0x14cb05, + 0xf08c7, + 0x26e49, + 0x11b646, + 0x11090b, + 0x34106, + 0x61d47, + 0x1dbac6, + 0x649, + 0x18540a, + 0x97bcd, + 0xdc10c, + 0x11e34a, + 0xa8ac8, + 0x1cf805, + 0x26e88, + 0x15d46, + 0x1d1b86, + 0x54146, + 0x204c02, + 0x26c4, + 0x170cc6, + 0x14e160e, + 0x1d5186, + 0x7440c, + 0x3fb72a4b, + 0x14cb05, + 0x14fb4b, + 0x3fe8c007, + 0x4028c00a, + 0x407d1ac4, + 0x50c9, + 0x9548, + 0x1bd207, + 0x25791, + 0x13064a, + 0x206643, + 0x40a8d788, + 0xfe005, + 0x1896c8, + 0x1b7344, + 0x4eb05, + 0xaeb47, + 0x1a9d0b, + 0x40e1f109, + 0x115c5, + 0x1702c6, + 0x163486, + 0x9d28a, + 0x10320c, + 0x1c1303, + 0x1e8684, + 0x413ed484, + 0x5c4c9, + 0x100ec7, + 0x588ca, + 0x14e5a49, + 0x605, + 0x10f303, + 0x41637047, + 0x1f45, + 0x156ca86, + 0x140c406, + 0x15ce8c, + 0x10c348, + 0x41930845, + 0x41c413c3, + 0x110fc4, + 0x6948b, + 0x121e0b, + 0x4224f04c, + 0x1426143, + 0xcef48, + 0xd254b, + 0xaea09, + 0xd9143, + 0x124848, + 0x1422886, + 0x95607, + 0x42761949, + 0x30147, + 0x43aeba48, + 0xa19c4, + 0x1178c7, + 0xe040a, + 0x43f65188, + 0x11d3cd, + 0x1c6e09, + 0x1d7808, + 0x15c43, + 0x14493c9, + 0x4ac44, + 0x97c5, + 0x3c583, + 0x34106, + 0x3042, + 0x15c44, + 0x2a385, + 0x1aa884, + 0x142db83, + 0x1a6c7, + 0x42a1a6c3, + 0x42fcc386, + 0x4323ab84, + 0x436fddc7, + 0xfbc44, + 0x125f07, + 0xfbc44, + 0x125f07, + 0xfbc44, + 0xfbc44, + 0x125f07, + 0x1dee09, + 0x41, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x2000c2, + 0x202202, + 0x205503, + 0x203042, + 0x2109c3, + 0x21f143, + 0x213dc3, + 0x387fcf, + 0x38838e, + 0x1b9688, + 0x206643, + 0x49087, + 0x21f603, + 0x205503, + 0x211e43, + 0x2109c3, + 0x21f143, + 0x1d50c4, + 0x1d4704, + 0xa04, + 0x220fc3, + 0x3d5787, + 0x204782, + 0x279f49, + 0x200b02, + 0x25a88b, + 0x2ed3ca, + 0x3289c9, + 0x200542, + 0x3b7706, + 0x25fd95, + 0x25a9d5, + 0x264ed3, + 0x25af53, + 0x216302, + 0x228645, + 0x3c340c, + 0x2815cb, + 0x259e85, + 0x206182, + 0x2f45c2, + 0x2f45c6, + 0x2028c2, + 0x296186, + 0x23e88d, + 0x258d4c, + 0x3c7504, + 0x200882, + 0x210342, + 0x28c748, + 0x200202, + 0x33d3c6, + 0x39d54f, + 0x3d47d0, + 0x2fd044, + 0x25ff55, + 0x265053, + 0x214ec3, + 0x35898a, + 0x38b6c7, + 0x392409, + 0x313d87, + 0x269782, + 0x200282, + 0x3caa06, + 0x206202, + 0x1b9688, + 0x20b4c2, + 0x20b782, + 0x20b787, + 0x3aeb47, + 0x3aeb51, + 0x21d0c5, + 0x21d0ce, + 0x21e8cf, + 0x202fc2, + 0x221007, + 0x2211c8, + 0x201002, + 0x220302, + 0x210506, + 0x21050f, + 0x2711d0, + 0x22d3c2, + 0x201782, + 0x229288, + 0x201783, + 0x26d748, + 0x23decd, + 0x202f03, + 0x3d03c8, + 0x26220f, + 0x2625ce, + 0x214d8a, + 0x2eebd1, + 0x2ef050, + 0x2deecd, + 0x2df20c, + 0x381dc7, + 0x358b07, + 0x3c2a49, + 0x2170c2, + 0x2011c2, + 0x263b4c, + 0x263e4b, + 0x203402, + 0x39ba06, + 0x204102, + 0x200482, + 0x3547c2, + 0x202202, + 0x22ec04, + 0x23bcc7, + 0x22dd42, + 0x243c87, + 0x246f47, + 0x242982, + 0x211802, + 0x248f45, + 0x254302, + 0x2b52ce, + 0x37b2cd, + 0x21f603, + 0x2936ce, + 0x2d49cd, + 0x374243, + 0x202482, + 0x240dc4, + 0x213242, + 0x202442, + 0x3aa7c5, + 0x24c1c7, + 0x24dc42, + 0x20fd02, + 0x253c47, + 0x25c848, + 0x2596c2, + 0x286fc6, + 0x2639cc, + 0x263d0b, + 0x208102, + 0x26db4f, + 0x26df10, + 0x26e30f, + 0x26e6d5, + 0x26ec14, + 0x26f10e, + 0x26f48e, + 0x26f80f, + 0x26fbce, + 0x26ff54, + 0x270453, + 0x27090d, + 0x285809, + 0x299443, + 0x2030c2, + 0x360605, + 0x2045c6, + 0x200382, + 0x2f7c47, + 0x205503, + 0x200642, + 0x233488, + 0x2eee11, + 0x2ef250, + 0x206a42, + 0x298707, + 0x202b82, + 0x259887, + 0x206982, + 0x33a609, + 0x2f4587, + 0x298bc8, + 0x3cf606, + 0x303543, + 0x398805, + 0x22a2c2, + 0x2004c2, + 0x3c5ec5, + 0x3dbd85, + 0x201282, + 0x211f83, + 0x2a1a47, + 0x3d1e07, + 0x201702, + 0x3878c4, + 0x207783, + 0x3ee109, + 0x207788, + 0x206b42, + 0x209582, + 0x22bb07, + 0x3b6b85, + 0x258488, + 0x228347, + 0x225ac3, + 0x370b46, + 0x2ded4d, + 0x2df0cc, + 0x3898c6, + 0x202f82, + 0x202042, + 0x200ec2, + 0x26208f, + 0x26248e, + 0x395387, + 0x201e42, + 0x23d4c5, + 0x23d4c6, + 0x21cf02, + 0x201502, + 0x29a0c6, + 0x245e43, + 0x3c2346, + 0x2e2c05, + 0x2e2c0d, + 0x2e3915, + 0x2e4f0c, + 0x2e528d, + 0x2e55d2, + 0x203c42, + 0x27d202, + 0xf9d4c, + 0x201b82, + 0x300486, + 0x3cda46, + 0x45295a04, + 0x201f42, + 0x204646, + 0x203902, + 0x25f385, + 0x206282, + 0x2b5409, + 0x21634c, + 0x21668b, + 0x2003c2, + 0x25df08, + 0x205842, + 0x206702, + 0x281386, + 0x28bb05, + 0x38cf47, + 0x33e845, + 0x2746c5, + 0x207202, + 0x226f42, + 0x2020c2, + 0x2b0d07, + 0x30718d, + 0x30750c, + 0x237447, + 0x286f42, + 0x201402, + 0x3cac08, + 0x201408, + 0x2ec9c8, + 0x3ba644, + 0x3ef987, + 0x304a03, + 0x271c02, + 0x20bc02, + 0x307f09, + 0x35f407, + 0x202b02, + 0x2819c5, + 0x21a002, + 0x21ec82, + 0x30dd03, + 0x30dd06, + 0x30e242, + 0x310e82, + 0x200402, + 0x204746, + 0x240d07, + 0x201182, + 0x200902, + 0x26d58f, + 0x29350d, + 0x296b0e, + 0x2d484c, + 0x208ac2, + 0x202b42, + 0x3cf445, + 0x32c506, + 0x225402, + 0x203f42, + 0x200682, + 0x2282c4, + 0x2dfa04, + 0x35ccc6, + 0x204842, + 0x293fc7, + 0x204843, + 0x23b6c8, + 0x23da88, + 0x2478c7, + 0x2611c6, + 0x201a42, + 0x204083, + 0x204087, + 0x282486, + 0x2d9985, + 0x283508, + 0x203c82, + 0x2f5fc7, + 0x20e7c2, + 0x2b26c2, + 0x202e82, + 0x21ea49, + 0x20de02, + 0x18b88, + 0x2019c2, + 0x25b383, + 0x3501c7, + 0x202a42, + 0x2164cc, + 0x2167cb, + 0x389946, + 0x312485, + 0x45606b83, + 0x201b42, + 0x20c642, + 0x2d0a06, + 0x2274c3, + 0x36af87, + 0x26bac2, + 0x2008c2, + 0x25fc15, + 0x25ab95, + 0x264d93, + 0x25b0d3, + 0x284447, + 0x2a7e51, + 0x2c2290, + 0x396092, + 0x2c16d1, + 0x2c4d48, + 0x2c4d50, + 0x2c810f, + 0x2ed193, + 0x3a8152, + 0x2e2810, + 0x2e71cf, + 0x2e9fd2, + 0x2eab51, + 0x2ed913, + 0x2ee252, + 0x2f1b0f, + 0x2f2c0e, + 0x2f53d2, + 0x3287d1, + 0x3063cf, + 0x30bb4e, + 0x30c511, + 0x30e5d0, + 0x311a12, + 0x3132d1, + 0x330f10, + 0x33db4f, + 0x37eb11, + 0x3e2250, + 0x32a506, + 0x3385c7, + 0x20cf47, + 0x204042, + 0x290285, + 0x319187, + 0x224b42, + 0x202d02, + 0x294ac5, + 0x223683, + 0x3de2c6, + 0x30734d, + 0x30768c, + 0x2046c2, + 0x3c328b, + 0x28148a, + 0x283b8a, + 0x230b09, + 0x2cfa4b, + 0x305ecd, + 0x22848c, + 0x31988a, + 0x2464cc, + 0x249fcb, + 0x259ccc, + 0x27f5ce, + 0x284e8b, + 0x2a45cc, + 0x2cad83, + 0x325e86, + 0x365182, + 0x221902, + 0x25eb03, + 0x201102, + 0x22fd43, + 0x3c26c6, + 0x26e887, + 0x2d69c6, + 0x3a8cc8, + 0x201108, + 0x214786, + 0x20c302, + 0x319fcd, + 0x31a30c, + 0x321fc7, + 0x31e187, + 0x211202, + 0x219882, + 0x204002, + 0x28b082, + 0x33d2d6, + 0x342515, + 0x344cd6, + 0x34a393, + 0x34aa52, + 0x35add3, + 0x35b452, + 0x3b480f, + 0x3c8b58, + 0x3ca497, + 0x3cde99, + 0x3cfb18, + 0x3d09d8, + 0x3d1557, + 0x3d2c17, + 0x3d6d16, + 0x3e2e93, + 0x3e3595, + 0x3e3fd2, + 0x3e4453, + 0x17082, + 0x45a2c284, + 0x45fc7448, + 0x2c45, + 0x202202, + 0x2109c3, + 0x232c2, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x294744, + 0x2109c3, + 0x21f143, + 0x213dc3, + 0x2000c2, + 0x213c82, + 0x46e9f385, + 0x4724bf85, + 0x47669d46, + 0x1b9688, + 0x47ac8685, + 0x202202, + 0x201482, + 0x47f69f05, + 0x4828e105, + 0x4868ef07, + 0x48a760c9, + 0x48fb60c4, 0x200382, 0x200642, - 0x4826ab45, - 0x486a6449, - 0x48baaf08, - 0x48ec7645, - 0x49359c47, - 0x4961e1c8, - 0x49b14945, - 0x49eb0206, - 0x4a2564c9, - 0x4a78d7c8, - 0x4aade488, - 0x4aeb0c4a, - 0x4b23cdc4, - 0x4b6bd045, - 0x4bab9cc8, - 0x4be59605, - 0x21f082, - 0x4c2024c3, - 0x4c6bbc86, - 0x4ca4c1c8, - 0x4ceffb06, - 0x4d358048, - 0x4d7a9686, - 0x4da43e44, - 0x4de01f42, - 0x4e6ee3c7, - 0x4eac2644, - 0x4ee909c7, - 0x4f3e8187, + 0x4925b745, + 0x4969ea09, + 0x49a11008, + 0x49ec0a45, + 0x4a355507, + 0x4a613388, + 0x4ab1db05, + 0x4aea6d46, + 0x4b24c849, + 0x4b79ecc8, + 0x4bad8288, + 0x4bea778a, + 0x4c3cc904, + 0x4c6b5cc5, + 0x4cab3ac8, + 0x4ce4e685, + 0x217702, + 0x4d201e43, + 0x4d6b41c6, + 0x4da48908, + 0x4dfc98c6, + 0x4e214888, + 0x4e7d3446, + 0x4ea52d84, + 0x4ee018c2, + 0x4f6e9907, + 0x4fabbb04, + 0x4fe88f07, + 0x503ea507, 0x2003c2, - 0x4f6b54c5, - 0x4fa84804, - 0x4ffb69c7, - 0x50247947, - 0x50698f06, - 0x50a1a705, - 0x50eaf707, - 0x513ce308, - 0x51663b07, - 0x51ac4449, - 0x51eeed85, - 0x52316ec7, - 0x526a6146, - 0x2af8b, - 0x52a33c88, - 0x22ec0d, - 0x261089, - 0x28970b, - 0x2a66cb, - 0x2bf2cb, - 0x2cd90b, - 0x32ed0b, - 0x32efcb, - 0x32f9c9, - 0x33088b, - 0x330b4b, - 0x33110b, - 0x33244a, - 0x33298a, - 0x332f8c, - 0x337e8b, - 0x33848a, - 0x34ac0a, - 0x354ace, - 0x35570e, - 0x355a8a, - 0x358e4a, - 0x35a38b, - 0x35a64b, - 0x35b14b, - 0x37ca4b, - 0x37d04a, - 0x37dd0b, - 0x37dfca, - 0x37e24a, - 0x37e4ca, - 0x39c14b, - 0x3a360b, - 0x3a678e, - 0x3a6b0b, - 0x3aeb4b, - 0x3b034b, - 0x3b490a, - 0x3b4b89, - 0x3b4dca, - 0x3b66ca, - 0x3cf58b, - 0x3deb4b, - 0x3df64a, - 0x3e014b, - 0x3e660b, - 0x3f2a0b, - 0x52e96f08, - 0x5329d189, - 0x536b8389, - 0x53afcdc8, - 0x3607c5, - 0x203ec3, - 0x267c44, - 0x397685, - 0x3bb146, - 0x3574c5, - 0x29c844, - 0x2e4e48, - 0x328f45, - 0x2a88c4, - 0x206a87, - 0x2b74ca, - 0x325b4a, - 0x39bac7, - 0x23f147, - 0x2f7287, - 0x26acc7, - 0x3bfb05, - 0x3bbd86, - 0x249647, - 0x2606c4, - 0x2d2d86, - 0x302a46, - 0x3cb6c5, - 0x37b304, - 0x2b5106, - 0x2b6407, - 0x22ef06, - 0x320307, - 0x242d03, - 0x3cb3c6, - 0x230e85, - 0x295c87, - 0x27e84a, - 0x362244, - 0x21b988, - 0x2c3d89, - 0x381307, - 0x356306, - 0x28d588, - 0x3e8509, - 0x3622c4, - 0x287144, - 0x2d01c5, - 0x225908, - 0x2e7187, - 0x3138c9, - 0x23de88, - 0x321f06, - 0x240646, - 0x2b1a08, - 0x37bb06, - 0x2a6305, - 0x298fc6, - 0x290d88, - 0x297a46, - 0x26cc0b, - 0x2a2e06, - 0x2b3f8d, - 0x209805, - 0x2c2506, - 0x21e285, - 0x2d0389, - 0x32a287, - 0x214cc8, - 0x2c2ac6, - 0x2b2549, - 0x3a3886, - 0x27e7c5, - 0x21e4c6, - 0x2e4146, - 0x2eb409, - 0x2d4e06, - 0x214f47, - 0x2ac7c5, - 0x204343, - 0x264a45, - 0x2d2bc7, - 0x3db5c6, - 0x209709, - 0x253946, - 0x299206, - 0x24db09, - 0x2989c9, - 0x2ba547, - 0x36b208, - 0x2a4649, - 0x296108, - 0x2e5606, - 0x2f4b85, - 0x28f48a, - 0x299286, - 0x3d0646, - 0x2ee305, - 0x266b88, - 0x3af747, - 0x23c48a, - 0x262186, - 0x328245, - 0x313fc6, - 0x2a71c7, - 0x3561c7, - 0x2aec85, - 0x27e985, - 0x27b046, - 0x27bd86, - 0x285d86, - 0x245f84, - 0x297389, - 0x29f106, - 0x2ed9ca, - 0x21f648, - 0x316bc8, - 0x325b4a, - 0x24c385, - 0x2b6345, - 0x2093c8, - 0x32c608, - 0x24e687, - 0x2dd546, - 0x342288, - 0x220247, - 0x296808, - 0x2d3dc6, - 0x299c88, - 0x2ad906, - 0x2f83c7, - 0x317e46, - 0x2b5106, - 0x234eca, - 0x2392c6, - 0x2f4b89, - 0x371946, - 0x21230a, - 0x243e49, - 0x3091c6, - 0x2d5a44, - 0x3648cd, - 0x294ac7, - 0x20b986, - 0x2de345, - 0x3a3905, - 0x39dc06, - 0x383609, - 0x39fe87, - 0x291786, - 0x376546, - 0x27d809, - 0x360d04, - 0x3092c4, - 0x3ec208, - 0x38fb86, - 0x2ac248, - 0x2f20c8, - 0x288a47, - 0x2ff509, - 0x3d5d07, - 0x2ce84a, - 0x30a04f, - 0x24eaca, - 0x3d67c5, - 0x290fc5, - 0x214c85, - 0x2ff147, - 0x292403, - 0x36b408, - 0x23a546, - 0x23a649, - 0x359646, - 0x22a647, - 0x2b2309, - 0x214bc8, - 0x2d0547, - 0x32cfc3, - 0x360845, - 0x2a6d05, - 0x245dcb, - 0x2596c4, - 0x381204, - 0x28ec46, - 0x32d187, - 0x39e18a, - 0x25d687, - 0x2318c7, - 0x294c85, - 0x3dc5c5, - 0x391749, - 0x2b5106, - 0x25d50d, - 0x300285, - 0x2d1d83, - 0x208e03, - 0x220b45, - 0x342b85, - 0x28d588, - 0x291f47, - 0x24b286, - 0x2b8006, - 0x235205, - 0x241387, - 0x3d30c7, - 0x267f87, - 0x2bd0ca, - 0x3cb488, - 0x245f84, - 0x2977c7, - 0x293e47, - 0x367346, - 0x2acf87, - 0x36da88, - 0x362948, - 0x29c146, - 0x23f388, - 0x2d4e84, - 0x249646, - 0x28d846, - 0x302c46, - 0x349986, - 0x2198c4, - 0x26ad86, - 0x2dd246, - 0x2b1146, - 0x234ec6, - 0x208cc6, - 0x2fe846, - 0x24b188, - 0x2cfec8, - 0x2f16c8, - 0x3576c8, - 0x209346, - 0x20f0c5, - 0x264a06, - 0x2c76c5, - 0x338e87, - 0x23df45, - 0x2180c3, - 0x312185, - 0x320d44, - 0x208e05, - 0x201983, - 0x35b707, - 0x2e5188, - 0x3203c6, - 0x2c57cd, - 0x290f86, - 0x2b06c5, - 0x2030c3, - 0x2d9949, - 0x360e86, - 0x2afcc6, - 0x21e5c4, - 0x24ea47, - 0x3db7c6, - 0x328485, - 0x258dc3, - 0x20e744, - 0x294006, - 0x3bbe84, - 0x3c7808, - 0x3da549, - 0x29c609, - 0x2b960a, - 0x25588d, - 0x38c147, - 0x39b606, - 0x22f4c4, - 0x2f7ec9, - 0x29b548, - 0x29d006, - 0x2476c6, - 0x2acf87, - 0x2f7746, - 0x222986, - 0x371346, - 0x3e820a, - 0x21e1c8, - 0x237505, - 0x371149, - 0x3e8f0a, - 0x398548, - 0x2b5d08, - 0x2afc48, - 0x3034cc, - 0x331385, - 0x2b8288, - 0x2d4046, - 0x375c46, - 0x2e5a47, - 0x25d585, - 0x299145, - 0x29c4c9, - 0x20f447, - 0x23a605, - 0x229507, - 0x208e03, - 0x2e7b45, - 0x229bc8, - 0x2ee7c7, - 0x2b5bc9, - 0x3e4245, - 0x303bc4, - 0x322fc8, - 0x248007, - 0x2d0708, - 0x257148, - 0x39f5c5, - 0x279386, - 0x2b8106, - 0x23ef49, - 0x28d947, - 0x2c7c06, - 0x3cbe87, - 0x20bd43, - 0x3bb404, - 0x36b845, - 0x2414c4, - 0x24e2c4, - 0x298447, - 0x27d6c7, - 0x291944, - 0x2b5a10, - 0x325f87, - 0x3dc5c5, - 0x20d0cc, - 0x256f04, - 0x2c4c48, - 0x2f82c9, - 0x2cb606, - 0x324a48, - 0x264984, - 0x28ef48, - 0x234386, - 0x234d48, - 0x2b69c6, - 0x2cf24b, - 0x3c3845, - 0x36b6c8, - 0x21b184, - 0x3da98a, - 0x2b5bc9, - 0x268b86, - 0x3b0888, - 0x29ed45, - 0x2d8f44, - 0x2c4b46, - 0x267e48, - 0x296f08, - 0x33d7c6, - 0x37b484, - 0x28f406, - 0x3d5d87, - 0x2908c7, - 0x2acf8f, - 0x34fcc7, - 0x39f007, - 0x375b05, - 0x336705, - 0x2ba209, - 0x2ed686, - 0x295dc5, - 0x298cc7, - 0x2eac88, - 0x245c05, - 0x317e46, - 0x21f488, - 0x2ffb0a, - 0x20bfc8, - 0x2a0a87, - 0x30a486, - 0x371106, - 0x2003c3, - 0x21c543, - 0x3e90c9, - 0x2a44c9, - 0x2c4346, - 0x3e4245, - 0x303348, - 0x3b0888, - 0x2b4d48, - 0x3713cb, - 0x2c5a07, - 0x326909, - 0x2ad208, - 0x2402c4, - 0x3d2a08, - 0x2a4189, - 0x2c7f05, - 0x2632c7, - 0x3bb485, - 0x296e08, - 0x2a698b, - 0x2af450, - 0x2c2145, - 0x21b0cc, - 0x24b445, - 0x294d03, - 0x2d78c6, - 0x2dc744, - 0x29ce86, - 0x2b6407, - 0x209344, - 0x3c4c48, - 0x36b2cd, - 0x3b0745, - 0x255d84, - 0x364644, - 0x39ed49, - 0x2af188, - 0x33ab87, - 0x234408, - 0x297448, - 0x291a85, - 0x3e9407, - 0x291a07, - 0x23a907, - 0x27e989, - 0x33bb89, - 0x264346, - 0x3b10c6, - 0x298d86, - 0x32e605, - 0x3cb204, - 0x3d8606, - 0x3da306, - 0x291ac8, - 0x2a6e8b, - 0x362107, - 0x22f4c4, - 0x3db706, - 0x3f3e07, - 0x2fe445, - 0x300645, - 0x28de04, - 0x33bb06, - 0x3d8688, - 0x2f7ec9, - 0x257906, - 0x29b348, - 0x328546, - 0x36c748, - 0x2df20c, - 0x291946, - 0x2b038d, - 0x2b080b, - 0x215005, - 0x3d3207, - 0x2d4f06, - 0x356088, - 0x2643c9, - 0x2c5d08, - 0x3dc5c5, - 0x2adb87, - 0x296208, - 0x3b85c9, - 0x21d3c6, - 0x25780a, - 0x355e08, - 0x2c5b4b, - 0x3a044c, - 0x28f048, - 0x292c06, - 0x348c48, - 0x2ff787, - 0x3581c9, - 0x2a634d, - 0x2b5006, - 0x2d0048, - 0x2cfd89, - 0x2da988, - 0x299d88, - 0x2ddc4c, - 0x2df647, - 0x2e0bc7, - 0x27e7c5, - 0x2b9f47, - 0x2eab48, - 0x2c4bc6, - 0x26990c, - 0x30c788, - 0x2ecb08, - 0x357986, - 0x333f87, - 0x264544, - 0x3576c8, - 0x29e40c, - 0x258b0c, - 0x3d6845, - 0x3cb747, - 0x37b406, - 0x333f06, - 0x3a5388, - 0x227984, - 0x22ef0b, - 0x2025cb, - 0x30a486, - 0x36b147, - 0x3495c5, - 0x287085, - 0x22f046, - 0x29ed05, - 0x259685, - 0x2ebf47, - 0x2298c9, - 0x206744, - 0x24a0c5, - 0x30f585, - 0x21a5c8, - 0x361c45, - 0x2e1149, - 0x2c6447, - 0x2c644b, - 0x308f06, - 0x24aec9, - 0x37b248, - 0x292e05, - 0x23aa08, - 0x33bbc8, - 0x367a47, - 0x234187, - 0x2984c9, - 0x234c87, - 0x2a2bc9, - 0x2bd90c, - 0x2c4348, - 0x3a0289, - 0x2deb87, - 0x297509, - 0x228087, - 0x3a0548, - 0x2ff6c5, - 0x2495c6, - 0x2de388, - 0x30a588, - 0x3e8dc9, - 0x2596c7, - 0x2b9805, - 0x3d4209, - 0x3a84c6, - 0x2a6144, - 0x315186, - 0x24c048, - 0x252787, - 0x2a7088, - 0x23f449, - 0x32dc47, - 0x2b4e86, - 0x3d2d44, - 0x312209, - 0x3e9288, - 0x357847, - 0x29ab46, - 0x2a6dc6, - 0x3d05c4, - 0x381b86, - 0x208d83, - 0x203ec9, - 0x3c3806, - 0x23f6c5, - 0x2b8006, - 0x2d0845, - 0x296688, - 0x220787, - 0x297d46, - 0x325e46, - 0x316bc8, - 0x2ba387, - 0x2b5045, - 0x2b5808, - 0x3def48, - 0x355e08, - 0x24b305, - 0x249646, - 0x29c3c9, - 0x23edc4, - 0x30fa8b, - 0x22268b, - 0x237409, - 0x208e03, - 0x26e945, - 0x39bfc6, - 0x24bbc8, - 0x311244, - 0x3203c6, - 0x2bd209, - 0x3762c5, - 0x2ebe86, - 0x248006, - 0x217604, - 0x2c26ca, - 0x23f608, - 0x30a586, - 0x37fdc5, - 0x3eb247, - 0x342dc7, - 0x279384, - 0x2228c7, - 0x23df04, - 0x23df06, - 0x20d183, - 0x27e985, - 0x2ca445, - 0x34ff08, - 0x297985, - 0x291689, - 0x2bba87, - 0x35750b, - 0x2bba8c, - 0x2bc08a, - 0x359c47, - 0x206b43, - 0x2902c8, - 0x309285, - 0x245c85, - 0x360904, - 0x3a0446, - 0x2f82c6, - 0x381bc7, - 0x28884b, - 0x2198c4, - 0x3aae84, - 0x2e4a84, - 0x2ea9c6, - 0x209344, - 0x225a08, - 0x360705, - 0x255ec5, - 0x2b4c87, - 0x3d3309, - 0x342b85, - 0x39dc0a, - 0x2ac6c9, - 0x2ab14a, - 0x3e8349, - 0x328744, - 0x376605, - 0x2f7848, - 0x3b6a8b, - 0x2d01c5, - 0x2f2246, - 0x24f404, - 0x291bc6, - 0x32dac9, - 0x3f3f07, - 0x253b08, - 0x255c06, - 0x3d5d07, - 0x296f08, - 0x386686, - 0x3d2dc4, - 0x270b47, - 0x391445, - 0x3a0a47, - 0x264884, - 0x2d4e86, - 0x342f08, - 0x2b09c8, - 0x3e4e07, - 0x32cc48, - 0x2ad9c5, - 0x208c44, - 0x325a48, - 0x2d79c4, - 0x2174c5, - 0x3a5184, - 0x220347, - 0x29f1c7, - 0x297648, - 0x2d0886, - 0x297905, - 0x291488, - 0x20c1c8, - 0x2b9549, - 0x222986, - 0x23c508, - 0x3da80a, - 0x2fe4c8, - 0x314945, - 0x264c06, - 0x2ac588, - 0x2adc4a, - 0x237f47, - 0x29b985, - 0x2a9588, - 0x2c0644, - 0x266c06, - 0x2e1a48, - 0x208cc6, - 0x3dfcc8, - 0x26cf47, - 0x206986, - 0x2d5a44, - 0x2445c7, - 0x2d1044, - 0x32da87, - 0x357b8d, - 0x237485, - 0x38340b, - 0x258d86, - 0x268a08, - 0x3636c4, - 0x285f86, - 0x294006, - 0x348f87, - 0x2b004d, - 0x30e947, - 0x2d1cc8, - 0x2a0f85, - 0x2d1608, - 0x2e7106, - 0x2ada48, - 0x23da86, - 0x20ce47, - 0x2251c9, - 0x3636c7, - 0x29d2c8, - 0x289b05, - 0x235288, - 0x333e45, - 0x317d85, - 0x378d45, - 0x21e503, - 0x299044, - 0x2a9785, - 0x2564c9, - 0x381986, - 0x36db88, - 0x3a13c5, - 0x2d3047, - 0x32a58a, - 0x2ebdc9, - 0x2e404a, - 0x2f1748, - 0x22934c, - 0x298d4d, - 0x3777c3, - 0x3dfbc8, - 0x20e705, - 0x2ff8c6, - 0x214a46, - 0x364585, - 0x3cbf89, - 0x3ea105, - 0x291488, - 0x26fa46, - 0x37a4c6, - 0x2bac89, - 0x233387, - 0x2a6c46, - 0x32a508, - 0x302b48, - 0x2fcfc7, - 0x2eb64e, - 0x2e7345, - 0x3b84c5, - 0x208bc8, - 0x3b90c7, - 0x229942, - 0x2dda04, - 0x29cd8a, - 0x357908, - 0x33bd06, - 0x2b2448, - 0x2b8106, - 0x336588, - 0x2c7c08, - 0x317d44, - 0x2d3805, - 0x7bf004, - 0x7bf004, - 0x7bf004, - 0x204043, - 0x206586, - 0x291946, - 0x2b6dcc, - 0x2069c3, - 0x264886, - 0x20d144, - 0x360e08, - 0x2bd045, - 0x29ce86, - 0x2b9dc8, - 0x2f2cc6, - 0x297cc6, - 0x21d148, - 0x36b8c7, - 0x234a49, - 0x32438a, - 0x263b44, - 0x23df45, - 0x313885, - 0x360b86, - 0x38c186, - 0x3d3746, - 0x385306, - 0x234b84, - 0x234b8b, - 0x23dd04, - 0x22a6c5, - 0x2c7005, - 0x288b06, - 0x20ac08, - 0x298c07, - 0x204284, - 0x273443, - 0x2c0145, - 0x315047, - 0x298b0b, - 0x34fe07, - 0x2b9cc8, - 0x2d8147, - 0x281406, - 0x261348, - 0x2d75cb, - 0x3975c6, - 0x210249, - 0x2d7745, - 0x32cfc3, - 0x2ebe86, - 0x26ce48, - 0x211903, - 0x2a17c3, - 0x233d86, - 0x2b8106, - 0x3848ca, - 0x292c45, - 0x293e4b, - 0x2b7f4b, - 0x242e83, - 0x201f03, + 0x506abc05, + 0x50a57504, + 0x50f70607, + 0x5123ae07, + 0x51692306, + 0x51a8eb05, + 0x51ea5f87, + 0x522c8d48, + 0x5260c647, + 0x52abdf89, + 0x52ee3e85, + 0x53314f87, + 0x5369e706, + 0x1eccb, + 0x53bdcc08, + 0x231e4d, + 0x269809, + 0x28308b, + 0x29c10b, + 0x2b7acb, + 0x3e550b, + 0x32c70b, + 0x32c9cb, + 0x32d349, + 0x32e34b, + 0x32e60b, + 0x32eb8b, + 0x32f84a, + 0x32fd8a, + 0x33038c, + 0x33434b, + 0x334d8a, + 0x3485ca, + 0x350e8e, + 0x351f8e, + 0x35230a, + 0x354d8a, + 0x355b0b, + 0x355dcb, + 0x3568cb, + 0x37680b, + 0x376e0a, + 0x377acb, + 0x377d8a, + 0x37800a, + 0x37828a, + 0x395c8b, + 0x39ca0b, + 0x39f5ce, + 0x39f94b, + 0x3a69cb, + 0x3a7ccb, + 0x3abc8a, + 0x3abf09, + 0x3ac14a, + 0x3ada4a, + 0x3c854b, + 0x3e058b, + 0x3e140a, + 0x3e28cb, + 0x3e898b, + 0x3f218b, + 0x53e90e08, + 0x54297009, + 0x546ae889, + 0x54af64c8, + 0x35ca45, + 0x20ba83, + 0x256884, + 0x382285, + 0x3b5e06, + 0x3537c5, + 0x295fc4, + 0x2f7b48, + 0x326d45, + 0x2a0a44, + 0x3cb1c7, + 0x2ada4a, + 0x2ffa8a, + 0x395487, + 0x241d07, + 0x2f2007, + 0x25b8c7, + 0x3a2fc5, + 0x275b46, + 0x2d6e47, + 0x24cfc4, + 0x2cd406, + 0x2fb206, + 0x3afbc5, + 0x381a84, + 0x2ab846, + 0x2acc87, + 0x2269c6, + 0x30ef07, + 0x2a0303, + 0x3c4ac6, + 0x2294c5, + 0x28f007, + 0x27990a, + 0x233584, + 0x20ec08, + 0x2bd889, + 0x2c5787, + 0x334c06, + 0x2e9a48, + 0x3ea889, + 0x23a704, + 0x280544, + 0x213785, + 0x2d1508, + 0x2e1247, + 0x311049, + 0x2455c8, + 0x3218c6, + 0x245946, + 0x2a84c8, + 0x375946, + 0x24bf85, + 0x2923c6, + 0x289348, + 0x25cd86, + 0x262e0b, + 0x29b606, + 0x2aa20d, + 0x20ab45, + 0x2bb9c6, + 0x20e0c5, + 0x235249, + 0x371cc7, + 0x210e08, + 0x2be906, + 0x2a9449, + 0x356e06, + 0x279885, + 0x213686, + 0x2f3f06, + 0x2e62c9, + 0x2cde86, + 0x2c7d47, + 0x2b1705, + 0x211d03, + 0x262f85, + 0x3c2447, + 0x375406, + 0x20aa49, + 0x269d46, + 0x2805c6, + 0x244289, + 0x291dc9, + 0x2b1c87, + 0x366448, + 0x29d849, + 0x28ff08, + 0x2de7c6, + 0x2ef645, + 0x287e0a, + 0x280646, + 0x3542c6, + 0x2e9845, + 0x25bd48, + 0x351507, + 0x23118a, + 0x255cc6, + 0x24a205, + 0x311746, + 0x29fa47, + 0x334ac7, + 0x31fb85, + 0x279a45, + 0x271046, + 0x275786, + 0x27d446, + 0x2390c4, + 0x291209, + 0x2984c6, + 0x3e58ca, + 0x21ac08, + 0x314c88, + 0x2ffa8a, + 0x248ac5, + 0x2acbc5, + 0x3cc688, + 0x384008, + 0x241147, + 0x39b706, + 0x340488, + 0x3f3247, + 0x28f148, + 0x2cd306, + 0x292bc8, + 0x2a53c6, + 0x2765c7, + 0x35f646, + 0x2ab846, + 0x22780a, + 0x22ec86, + 0x2ef649, + 0x2fef06, + 0x20b8ca, + 0x252d89, + 0x307b46, 0x2ce7c4, - 0x32a3c7, - 0x26cec4, - 0x23ee44, - 0x2d3ec4, - 0x2fe7c8, - 0x37fd08, - 0x21c4c9, - 0x2eee08, - 0x378fc7, - 0x234ec6, - 0x36d7cf, - 0x2e7486, - 0x2f0e44, - 0x37fb4a, - 0x314f47, - 0x2d1146, - 0x2a6189, - 0x21c445, - 0x27be45, - 0x21c586, - 0x2353c3, - 0x2c0689, - 0x21e346, - 0x23f209, - 0x39e186, - 0x27e985, - 0x3d6c45, - 0x206043, - 0x347e48, - 0x33ad47, - 0x23a544, - 0x360c88, - 0x3aac04, - 0x3e3f86, - 0x2d78c6, - 0x24a886, - 0x36b589, - 0x245c05, - 0x2b5106, - 0x2839c9, - 0x2e6246, - 0x2fe846, - 0x3b2f06, - 0x204105, - 0x3a5186, - 0x20ce44, - 0x2ff6c5, - 0x2de384, - 0x2d2586, - 0x300244, - 0x2027c3, - 0x29b605, + 0x3606cd, + 0x28df47, + 0x3c5386, + 0x2d8145, + 0x356e85, + 0x33ca86, + 0x2c6a49, + 0x2da647, + 0x28a106, + 0x37ce46, + 0x278789, + 0x24bec4, + 0x307c44, + 0x3bdc48, + 0x35fac6, + 0x2b1188, + 0x2f6dc8, + 0x261787, + 0x2fd349, + 0x3ce787, + 0x2c854a, + 0x3089cf, + 0x3a4d8a, + 0x3cf245, + 0x289585, + 0x20cb45, + 0x2fcf87, + 0x28ae83, + 0x366648, + 0x273146, + 0x273249, + 0x358446, + 0x3667c7, + 0x2a9209, + 0x210d08, + 0x235407, + 0x32b3c3, + 0x35cac5, + 0x29f585, + 0x238f0b, + 0x24e744, + 0x315904, + 0x2875c6, + 0x32b587, + 0x39834a, + 0x280ec7, + 0x3467c7, + 0x28e105, + 0x3d7285, + 0x28f689, + 0x2ab846, + 0x280d4d, + 0x3ca045, + 0x2cb083, + 0x21da03, + 0x22ff05, + 0x340d85, + 0x2e9a48, + 0x28a8c7, + 0x2405c6, + 0x2ae506, + 0x22bcc5, + 0x236347, + 0x36a2c7, + 0x238507, + 0x2b5d4a, + 0x3c4b88, + 0x2390c4, + 0x2907c7, + 0x28d2c7, + 0x362686, + 0x2a4a47, + 0x31ad08, + 0x3855c8, + 0x27ed46, 0x241f48, - 0x235707, - 0x3112c9, - 0x29b888, - 0x2b17d1, - 0x24808a, - 0x30a3c7, - 0x26d086, - 0x20d144, - 0x2de488, - 0x3e5088, - 0x2b198a, - 0x2e0f0d, - 0x21e4c6, - 0x21d246, - 0x244686, - 0x2aeb07, - 0x2d1d85, - 0x325147, - 0x360d45, - 0x2c6584, - 0x3d1306, - 0x249487, - 0x2c038d, - 0x2ac4c7, - 0x2e4d48, - 0x291789, - 0x264b06, - 0x21d345, - 0x245b84, - 0x24c146, - 0x2ff406, - 0x357a86, - 0x2b2cc8, - 0x216143, - 0x209503, - 0x3ead45, - 0x262286, - 0x2c7bc5, - 0x255e08, - 0x2b65ca, - 0x279484, - 0x360e08, - 0x2afc48, - 0x32a787, - 0x234009, - 0x2d9c88, - 0x2c44c7, - 0x2dd3c6, - 0x208cca, - 0x24c1c8, - 0x322e89, - 0x2af248, - 0x21fb49, - 0x362b47, - 0x3837c5, - 0x3715c6, - 0x2c4a48, - 0x297088, - 0x359f08, - 0x217608, - 0x22a6c5, - 0x204584, - 0x240a08, - 0x24f184, - 0x3e8144, - 0x27e985, - 0x2a8907, - 0x3d30c9, - 0x348d87, - 0x23c545, - 0x28ee46, - 0x37f5c6, - 0x20ad04, - 0x2bb106, - 0x294284, - 0x293b86, - 0x3d2e86, - 0x211746, - 0x3dc5c5, - 0x255cc7, - 0x206b43, - 0x32e089, - 0x3169c8, - 0x2f7dc4, - 0x2f7dcd, - 0x2b0ac8, - 0x2f6108, - 0x322e06, - 0x2252c9, - 0x2ebdc9, - 0x32d7c5, - 0x2b66ca, - 0x2abc0a, - 0x3cd0cc, - 0x3cd246, - 0x28fec6, - 0x2e7a86, - 0x3a1209, - 0x2ffb06, - 0x2ba3c6, - 0x3ea1c6, - 0x3576c8, - 0x20bfc6, - 0x2f060b, - 0x2a8a85, - 0x255ec5, - 0x2909c5, - 0x3ebf86, - 0x208c83, - 0x24a806, - 0x2ac447, - 0x2de345, - 0x3c35c5, - 0x3a3905, - 0x2fb286, - 0x32d884, - 0x33ee46, - 0x2a8e89, - 0x3ebe0c, - 0x2c62c8, - 0x267dc4, - 0x38d6c6, - 0x258e86, - 0x26ce48, - 0x3b0888, - 0x3ebd09, - 0x3eb247, - 0x38f8c9, - 0x288146, - 0x237004, - 0x361784, - 0x2987c4, - 0x296f08, - 0x3d2f0a, - 0x342b06, - 0x3759c7, - 0x3a0cc7, - 0x24afc5, - 0x2bb8c4, - 0x2a4146, - 0x2d1dc6, - 0x220883, - 0x316807, - 0x257048, - 0x32d90a, - 0x358908, - 0x358048, - 0x300285, - 0x22e485, - 0x362205, - 0x24b386, - 0x24f886, - 0x350045, - 0x204109, - 0x2bb6cc, - 0x318007, - 0x2b1a08, - 0x2a2f45, - 0x7bf004, - 0x2ca484, - 0x2ee904, - 0x213346, - 0x2b8e4e, - 0x27bec7, - 0x2aed05, - 0x23ed4c, - 0x2b4587, - 0x249407, - 0x24de49, - 0x21ba49, - 0x29b985, - 0x3169c8, - 0x29c3c9, - 0x355cc5, - 0x2de288, - 0x3ce506, - 0x325cc6, - 0x243e44, - 0x2ce008, - 0x255783, - 0x206184, - 0x2c01c5, - 0x3a5b07, - 0x355145, - 0x3da6c9, - 0x2a398d, - 0x2b3046, - 0x3f3684, - 0x2dd4c8, - 0x22970a, - 0x3cba07, - 0x246205, - 0x2061c3, - 0x2b810e, - 0x347f4c, - 0x398647, - 0x2b9007, - 0x4e3a4e47, - 0x136c46, - 0x2af84, - 0x203f43, - 0x2ffb45, - 0x2ee905, - 0x2b2808, - 0x2afa89, - 0x267cc6, - 0x26cec4, - 0x30a306, - 0x390c0b, - 0x2fa98c, - 0x26e6c7, - 0x2f08c5, - 0x3dee48, - 0x2fcd85, - 0x37fb47, - 0x2ee3c7, - 0x255785, - 0x208c83, - 0x246384, - 0x3638c5, - 0x206645, - 0x206646, - 0x2a4d08, - 0x249487, - 0x214d46, - 0x3d04c6, - 0x378c86, - 0x2d5009, - 0x3e9507, - 0x267b46, - 0x2fab06, - 0x23ccc6, - 0x2c2605, - 0x2185c6, - 0x3b7205, - 0x361cc8, - 0x2a81cb, - 0x2a36c6, - 0x3a0d04, - 0x2fefc9, - 0x2bba84, - 0x3ce488, - 0x318dc7, - 0x299c84, - 0x2d9148, - 0x2e0704, - 0x2c2644, - 0x2f7d05, - 0x3b0786, - 0x2fe707, - 0x234643, - 0x2b4f45, - 0x250504, - 0x3b8506, - 0x32d848, - 0x20bec5, - 0x2a7e89, - 0x359c45, - 0x264888, - 0x23ec07, - 0x3c3908, - 0x2d8d87, - 0x39f0c9, - 0x26ac06, - 0x2e4346, - 0x2a4784, - 0x30f9c5, - 0x3de58c, - 0x2909c7, - 0x290e87, - 0x38bdc8, - 0x2b3046, - 0x2ac384, - 0x327b84, - 0x298349, - 0x2e7b86, - 0x2b2387, - 0x3490c4, - 0x2bb206, - 0x3b7645, - 0x2ee247, - 0x2f0586, - 0x2576c9, - 0x2cec47, - 0x2acf87, - 0x2bab06, - 0x29aa85, - 0x295708, - 0x21e1c8, - 0x37be06, - 0x20bf05, - 0x3936c6, - 0x206b03, - 0x2b2689, - 0x3d34ce, - 0x2d7fc8, - 0x3aad08, - 0x37bc0b, - 0x2a80c6, - 0x3a0bc4, - 0x297cc4, - 0x3d35ca, - 0x21afc7, - 0x267c05, - 0x210249, - 0x2dd305, - 0x3e8187, - 0x246444, - 0x2a2647, - 0x2f1fc8, - 0x3813c6, - 0x2da1c9, - 0x2d9d8a, - 0x21af46, - 0x2b0606, - 0x2c6f85, - 0x3a70c5, - 0x399347, - 0x255108, - 0x3b7588, - 0x317d46, - 0x3d6cc5, - 0x38bf0e, - 0x245f84, - 0x2b2785, - 0x28e7c9, - 0x2ed488, - 0x2a09c6, - 0x2b530c, - 0x2b61d0, - 0x2b8a8f, - 0x2ba108, - 0x359c47, - 0x3dc5c5, - 0x2a9785, - 0x2fe589, - 0x2a9789, - 0x28f506, - 0x2d0247, - 0x3a5285, - 0x24e689, - 0x3673c6, - 0x2ff94d, - 0x298689, - 0x23ee44, - 0x2d7bc8, - 0x240ac9, - 0x342cc6, - 0x2904c5, - 0x2e4346, - 0x2539c9, - 0x233b08, - 0x20f0c5, - 0x2a4c84, - 0x2b54cb, - 0x342b85, - 0x24bc46, - 0x299086, - 0x3ae7c6, - 0x24b60b, - 0x2a7f89, - 0x2281c5, - 0x338d87, - 0x248006, - 0x2718c6, - 0x2ee688, - 0x225b89, - 0x2e4b0c, - 0x314e48, - 0x323c06, - 0x33d7c3, - 0x23c1c6, - 0x309205, - 0x294988, - 0x3d66c6, - 0x2d0608, - 0x25d705, - 0x2715c5, - 0x3a0048, - 0x3aaac7, - 0x214987, - 0x381bc7, - 0x324a48, - 0x359d88, - 0x2d1506, - 0x2d23c7, - 0x3bb2c7, - 0x39ee4a, - 0x203cc3, - 0x3ebf86, - 0x267f05, - 0x284804, - 0x291789, - 0x39f044, - 0x235784, - 0x2b6a44, - 0x2b900b, - 0x33ac87, - 0x2fb245, - 0x2ad6c8, - 0x28ee46, - 0x28ee48, - 0x292b86, - 0x2a1445, - 0x2a20c5, - 0x2a5106, - 0x279208, - 0x2a60c8, - 0x291946, - 0x2ad50f, - 0x2b2150, - 0x209805, - 0x206b43, - 0x2856c5, - 0x326848, - 0x2a9689, - 0x355e08, - 0x21e448, - 0x26afc8, - 0x33ad47, - 0x28eb09, - 0x2d0808, - 0x2a56c4, - 0x2b68c8, - 0x21a689, - 0x2d2907, - 0x2d2d04, - 0x348e48, - 0x255a8a, - 0x3221c6, - 0x21e4c6, - 0x222849, - 0x2b6407, - 0x2ebc48, - 0x2464c8, - 0x3d0b88, - 0x279b85, - 0x3b7e85, - 0x255ec5, - 0x2ee8c5, - 0x3cf007, - 0x208c85, - 0x2de345, - 0x207906, - 0x355d47, - 0x3b69c7, - 0x255d86, - 0x2f1c85, - 0x24bc46, - 0x220805, - 0x2e4888, - 0x32c584, - 0x2e62c6, - 0x398c44, - 0x2d8f48, - 0x2e63ca, - 0x291f4c, - 0x2b7685, - 0x2aebc6, - 0x2e4cc6, - 0x3e9fc6, - 0x323c84, - 0x3c8045, - 0x292447, - 0x2b6489, - 0x2eb507, - 0x7bf004, - 0x7bf004, - 0x33ab05, - 0x315d84, - 0x2b4aca, - 0x28ecc6, - 0x39ffc4, - 0x3cb6c5, - 0x3c4605, - 0x2d1cc4, - 0x298cc7, - 0x3d4387, - 0x2ea9c8, - 0x3937c8, - 0x20f0c9, - 0x2d79c8, - 0x29d40b, - 0x20cf04, - 0x317f45, - 0x295e45, - 0x381b49, - 0x225b89, - 0x2feec8, - 0x23dd08, - 0x288b04, - 0x258ec5, - 0x203ec3, - 0x360b45, - 0x2b5186, - 0x2af8cc, - 0x21e246, - 0x2903c6, - 0x2a0c45, - 0x2fb308, - 0x2e9786, - 0x26d206, - 0x21e4c6, - 0x22a3cc, - 0x286c44, - 0x378dca, - 0x2a0b88, - 0x2af707, - 0x250406, - 0x267d87, - 0x309f05, - 0x29ab46, - 0x368d86, - 0x381847, - 0x2357c4, - 0x220445, - 0x28e7c4, - 0x2c6607, - 0x28ea08, - 0x28fd4a, - 0x296087, - 0x2c2207, - 0x359bc7, - 0x2fcec9, - 0x2af8ca, - 0x234b43, - 0x2356c5, - 0x211783, - 0x2d3f09, - 0x362c88, - 0x375b07, - 0x355f09, - 0x21e2c6, - 0x227a08, - 0x35b685, - 0x20c2ca, - 0x3839c9, - 0x29c009, - 0x2e5a47, - 0x3e5189, - 0x211648, - 0x3d2c06, - 0x2aed88, - 0x204407, - 0x234c87, - 0x2ac6c7, - 0x3ce308, - 0x386186, - 0x255845, - 0x292447, - 0x2b0108, - 0x378c04, - 0x2ed884, - 0x2a6b47, - 0x2c7f87, - 0x29c24a, - 0x3d2b86, - 0x3c834a, - 0x2dd947, - 0x245d47, - 0x220504, - 0x2a2c84, - 0x2ee146, - 0x3e9a84, - 0x3e9a8c, - 0x318d05, - 0x217449, - 0x2af004, - 0x2d1d85, - 0x229688, - 0x20bb45, - 0x39dc06, - 0x237e44, - 0x2aa98a, - 0x2c6a06, - 0x2afdca, - 0x263b07, - 0x2a71c5, - 0x2353c5, - 0x24b00a, - 0x28d4c5, - 0x2b9606, - 0x24f184, - 0x2ce946, - 0x399405, - 0x3d6786, - 0x3e4e0c, - 0x3f278a, - 0x2abd04, - 0x234ec6, - 0x2b6407, - 0x2f0504, - 0x3576c8, - 0x2d6446, - 0x3a0b49, - 0x2f8649, - 0x2c4449, - 0x30fc46, - 0x204506, - 0x2aeec7, - 0x204048, - 0x204309, - 0x33ac87, - 0x2ad846, - 0x3d5d87, - 0x244545, - 0x245f84, - 0x2aea87, - 0x3bb485, - 0x29ccc5, - 0x394687, - 0x255648, - 0x3dedc6, - 0x2b0f8d, - 0x2b2a0f, - 0x2b7f4d, - 0x222744, - 0x242046, - 0x2f3508, - 0x3ea185, - 0x24b4c8, - 0x36790a, - 0x23ee44, - 0x2d2686, - 0x217e87, - 0x2198c7, - 0x36b989, - 0x2aed45, - 0x2d1cc4, - 0x2d374a, - 0x2d9849, - 0x3e5287, - 0x36be46, - 0x342cc6, - 0x258e06, - 0x270c06, - 0x32f48f, - 0x2f33c9, - 0x20bfc6, - 0x271506, - 0x23b6c9, - 0x2d24c7, - 0x200e83, - 0x22a546, - 0x21c543, - 0x364448, - 0x285e47, - 0x2ba309, - 0x2b9b48, - 0x214ac8, - 0x318146, - 0x232c89, - 0x38f805, - 0x23c104, - 0x383887, - 0x3a1285, - 0x222744, - 0x38c208, - 0x21b284, - 0x2d2207, - 0x2e5106, - 0x27b105, - 0x2af248, - 0x342b8b, - 0x316ec7, - 0x24b286, - 0x2e7504, - 0x3a9606, - 0x27e985, - 0x3bb485, - 0x295489, - 0x2988c9, - 0x234cc4, - 0x234d05, - 0x214b85, - 0x20c146, - 0x316ac8, - 0x2dcc46, - 0x256e8b, - 0x2cb48a, - 0x2d8e85, - 0x2a2146, - 0x23a245, - 0x357e45, - 0x2b5e87, - 0x3ec208, - 0x279284, - 0x260c46, - 0x2a6146, - 0x211807, - 0x32cf84, - 0x294006, - 0x2ff245, - 0x2ff249, - 0x204704, - 0x3139c9, - 0x291946, - 0x2df708, - 0x214b85, - 0x3a0dc5, - 0x3d6786, - 0x2e4a09, - 0x21ba49, - 0x290446, - 0x2ed588, - 0x23ee48, - 0x23a204, - 0x2d3bc4, - 0x2d3bc8, - 0x20ba88, - 0x38f9c9, - 0x2b5106, - 0x21e4c6, - 0x34214d, - 0x3203c6, - 0x2df0c9, - 0x228285, - 0x21c586, - 0x233c88, - 0x33ed85, - 0x3bb304, - 0x27e985, - 0x2981c8, - 0x2b4889, - 0x28e884, - 0x2d4e86, - 0x2f098a, - 0x398548, - 0x29c3c9, - 0x290b0a, - 0x355e86, - 0x2b2bc8, - 0x37f905, - 0x2a0e08, - 0x309f85, - 0x21e189, - 0x344849, - 0x231e42, - 0x2d7745, - 0x295f06, - 0x291887, - 0x284805, - 0x328146, - 0x31f388, - 0x2b3046, - 0x2f7709, + 0x2cdf04, + 0x2d6e46, + 0x267486, + 0x2fb406, + 0x335986, + 0x211cc4, + 0x25b986, + 0x2d6b46, + 0x2a7c86, + 0x227806, + 0x3cbf86, + 0x2fc686, + 0x2404c8, + 0x2c92c8, + 0x2ec248, + 0x3539c8, + 0x3cc606, + 0x2034c5, + 0x294106, + 0x2c0ac5, + 0x39a847, + 0x245685, + 0x2114c3, + 0x3202c5, + 0x3ec504, + 0x3cc0c5, + 0x2200c3, + 0x3525c7, + 0x2dd908, + 0x30efc6, + 0x2bf20d, + 0x289546, + 0x2a7205, + 0x21ea43, + 0x2d2c89, + 0x24c046, + 0x2a6546, + 0x2afb84, + 0x3a4d07, + 0x347b06, + 0x2da905, + 0x261643, + 0x212584, + 0x28d486, + 0x3aae04, + 0x3bd448, + 0x208249, + 0x27f209, + 0x2b0f8a, + 0x2b2e8d, + 0x233907, + 0x394fc6, + 0x226f84, + 0x2760c9, + 0x295048, + 0x296e86, + 0x23cb06, + 0x2a4a47, + 0x370046, + 0x21d946, + 0x2fe906, + 0x3ea58a, + 0x213388, + 0x22a685, + 0x2fe709, + 0x3d92ca, + 0x318ec8, + 0x2ac448, + 0x2a64c8, + 0x3721cc, + 0x32cc45, + 0x2ae788, + 0x2ce3c6, + 0x36f986, + 0x2dec07, + 0x280dc5, + 0x25ccc5, + 0x27f0c9, + 0x2053c7, + 0x273205, + 0x21fd07, + 0x21da03, + 0x2e1c05, + 0x21b548, + 0x263307, + 0x2ac309, + 0x2cf105, + 0x2fb984, + 0x329e48, + 0x3d8147, + 0x2355c8, + 0x20e208, + 0x397bc5, + 0x273046, + 0x213a46, + 0x3af7c9, + 0x267587, + 0x2c0f06, + 0x3bf107, + 0x203b83, + 0x3b60c4, + 0x3cd5c5, + 0x236484, + 0x24dc84, + 0x24bc07, + 0x278647, + 0x28a2c4, + 0x2ac150, + 0x346947, + 0x3d7285, + 0x3dc28c, + 0x20dfc4, + 0x2bedc8, + 0x2764c9, + 0x2c4ac6, + 0x324988, + 0x256584, + 0x2878c8, + 0x2e0c46, + 0x227688, + 0x2ad246, + 0x2e93cb, + 0x3b9a85, + 0x3cd448, + 0x208684, + 0x20868a, + 0x2ac309, 0x290f86, - 0x2ee508, - 0x2c3945, - 0x2533c6, - 0x20cf48, - 0x296f08, - 0x362a48, - 0x321f88, - 0x2185c4, - 0x232783, - 0x2f7944, - 0x296286, - 0x244584, - 0x3aac47, - 0x26d109, - 0x2e6645, - 0x2464c6, - 0x22a546, - 0x2a4b4b, - 0x2d1086, - 0x285906, - 0x2e9608, - 0x240646, - 0x2a6fc3, - 0x211483, - 0x245f84, - 0x23c405, - 0x328387, - 0x28ea08, - 0x28ea0f, - 0x29234b, - 0x3168c8, - 0x2d4f06, - 0x316bce, - 0x24b443, - 0x328304, - 0x2d1005, - 0x2d1b46, - 0x2a424b, - 0x2a89c6, - 0x21f509, - 0x27b105, - 0x38e748, - 0x209c08, - 0x21b90c, - 0x2b9046, - 0x360b86, - 0x3e4245, - 0x29d088, - 0x20cf45, - 0x2402c8, - 0x2b568a, - 0x2b8389, - 0x7bf004, - 0x2000c2, - 0x5420b642, - 0x200382, - 0x25c204, - 0x200ec2, - 0x230944, - 0x201f42, - 0x2003c2, - 0x206382, - 0x1c3448, - 0x11d44, - 0x21a043, - 0x226783, - 0x20f583, - 0xb882, - 0x5cf02, - 0x214e43, - 0x214883, - 0x24acc3, - 0x2ccc2, - 0xba42, - 0x3342, - 0x20bec3, - 0x21a043, - 0x226783, - 0x20f583, - 0x25c204, - 0x214883, - 0x24acc3, - 0x24f6c3, - 0x265a04, - 0x21a043, - 0x243544, - 0x226783, - 0x2eef84, - 0x20f583, - 0x284ac7, - 0x214e43, - 0x208c43, - 0x2a1948, - 0x24acc3, - 0x2937cb, - 0x30ad83, - 0x219086, - 0x249902, - 0x30614b, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, - 0x24acc3, - 0x21bd03, - 0x203783, - 0x2000c2, - 0x1c3448, - 0x235d45, - 0x3bb508, - 0x2ef0c8, - 0x20b642, - 0x3b9485, - 0x3d5ec7, - 0x201b42, - 0x3c4e47, - 0x200382, - 0x2696c7, - 0x246f89, - 0x27f1c8, - 0x3d0a09, - 0x213c02, - 0x20f2c7, - 0x393f04, - 0x3d5f87, - 0x2cb387, - 0x2dbe82, - 0x214e43, - 0x20d3c2, - 0x201f42, - 0x2003c2, - 0x2035c2, - 0x200902, - 0x206382, - 0x283b85, - 0x214405, - 0xb642, - 0x26783, - 0x21a043, - 0x226783, - 0x207ec3, - 0x20f583, - 0x211983, - 0x214883, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, - 0x159186, - 0x56ea590b, - 0x214e43, - 0x214883, - 0x24acc3, - 0x1a3d05, - 0x11f03, - 0x101, - 0x21a043, - 0x226783, - 0x20f583, - 0x25c204, - 0x20bc83, - 0x214883, - 0x24acc3, - 0x221c03, - 0x57889cc6, - 0x24dc3, - 0x1a85, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x20b642, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0xabc2, - 0x1c3448, - 0x3a103, - 0x550c4, - 0x148c0c4, - 0xfd185, - 0x1ad803, - 0x2000c2, - 0x339144, - 0x21a043, - 0x226783, - 0x20f583, - 0x23b9c3, - 0x239805, - 0x20bc83, - 0x222043, - 0x214883, - 0x262243, - 0x24acc3, - 0x219303, - 0x212f03, - 0x200f83, - 0x15103, - 0x5c2, - 0x445c2, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x2000c2, - 0x20bec3, - 0x20b642, - 0x101c2, - 0x226783, - 0x20f583, - 0x25c204, - 0x214883, - 0x24acc3, - 0x206382, - 0x1c3448, - 0x20f583, - 0x1c3448, - 0x2507c3, - 0x21a043, - 0x23e084, - 0x226783, - 0x20f583, - 0x206502, - 0x214e43, - 0x214883, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, - 0x206502, - 0x2384c3, - 0x214883, - 0x24acc3, - 0x305103, - 0x219303, - 0x2000c2, - 0x20b642, - 0x20f583, - 0x214883, - 0x24acc3, - 0x219085, - 0x16f9c6, - 0x78884, - 0xc47c4, - 0x265a04, - 0x249902, - 0x882, - 0x1c3448, - 0x101c2, - 0x5cf02, - 0x19c2, - 0x2000c2, - 0x14f985, - 0x27348, - 0x104543, - 0x20b642, - 0x49344, - 0x5cd61806, - 0x2d784, - 0xbe6cb, - 0x47306, - 0x89207, - 0x629c9, - 0x226783, - 0x5b4c8, - 0x5b4cb, - 0x5b94b, - 0x5c6cb, - 0x5ca0b, - 0x5cccb, - 0x5d10b, - 0x128c6, - 0x20f583, - 0x100185, - 0x1c0044, - 0x220303, - 0x121dc7, - 0x12f3c6, - 0x13b0c5, - 0xca04, - 0xf9004, - 0x82384, - 0x214883, - 0x90546, - 0x149204, - 0x24acc3, - 0x30b844, - 0x13b587, - 0x16f5c9, - 0xbe488, - 0x1e3885, - 0xf644, - 0x1d60c4, - 0x467c3, - 0x1d4803, - 0x5f986, - 0x112c8, - 0x198d45, - 0x1bf7c9, - 0x15ac3, - 0x112cc6, - 0x14f985, - 0x20b642, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x208c43, - 0x24acc3, - 0x30ad83, - 0x249902, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x231a43, - 0x225a04, - 0x214883, - 0x24acc3, - 0x21a043, - 0x226783, - 0x2eef84, - 0x20f583, - 0x214883, - 0x24acc3, - 0x219086, - 0x226783, - 0x20f583, - 0x1fe83, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x14f985, - 0x89207, - 0x9043, - 0x15ac3, - 0xba02, - 0x1c3448, - 0x20f583, - 0x21a043, - 0x226783, - 0x20f583, - 0x3e03, - 0x1a2a48, - 0x214883, - 0x24acc3, - 0x6061a043, - 0x226783, - 0x214883, - 0x24acc3, - 0x1c3448, - 0x2000c2, - 0x20b642, - 0x21a043, - 0x20f583, - 0x214883, - 0x2003c2, - 0x24acc3, - 0x344d87, - 0x23ac8b, - 0x217f43, - 0x28f1c8, - 0x203dc7, - 0x392fc6, - 0x228745, - 0x3b95c9, - 0x218e08, - 0x284ec9, - 0x284ed0, - 0x38a44b, - 0x3b1ec9, - 0x209043, - 0x207b09, - 0x23fd06, - 0x23fd0c, - 0x235e08, - 0x3f02c8, - 0x378149, - 0x2d884e, - 0x246d4b, - 0x2cae4c, - 0x205d83, - 0x2828cc, - 0x205d89, - 0x3f10c7, - 0x24100c, - 0x2ccaca, - 0x2346c4, - 0x2c5fcd, - 0x282788, - 0x3dcd4d, - 0x2874c6, - 0x265a0b, - 0x36f749, - 0x271787, - 0x20c506, - 0x326109, - 0x3a97ca, - 0x3bc848, - 0x30a984, - 0x2c8a07, - 0x252dc7, - 0x349b04, - 0x224c04, - 0x3ed149, - 0x324fc9, - 0x3d6448, - 0x33a645, - 0x213b45, - 0x20ef86, - 0x2c5e89, - 0x37ebcd, - 0x21c688, - 0x20ee87, - 0x2287c8, - 0x251c86, - 0x241804, - 0x3676c5, - 0x3e8046, - 0x3ef204, - 0x205c87, - 0x20864a, - 0x217384, - 0x21ae86, - 0x21ee89, - 0x21ee8f, - 0x21f84d, - 0x21fd86, - 0x226f50, - 0x227346, - 0x2289c7, - 0x229dc7, - 0x229dcf, - 0x22b509, - 0x22e9c6, - 0x22f607, - 0x22f608, - 0x22fe09, - 0x213f08, - 0x3133c7, - 0x2327c3, - 0x238a06, - 0x33a348, - 0x2d8b0a, - 0x2063c9, - 0x218f43, - 0x3b9386, - 0x260a8a, - 0x245207, - 0x3f0f0a, - 0x3ca8ce, - 0x22b646, - 0x34afc7, - 0x364ec6, - 0x205206, - 0x3b7c8b, - 0x2129ca, - 0x25404d, - 0x2045c7, - 0x3ea348, - 0x3ea349, - 0x3ea34f, - 0x31144c, - 0x387a89, - 0x2c16ce, - 0x284bca, - 0x380186, - 0x303dc6, - 0x32004c, - 0x3240cc, - 0x32d5c8, - 0x3635c7, - 0x20d6c5, - 0x3d6144, - 0x34a24e, - 0x224444, - 0x3eb647, - 0x3be48a, - 0x3ee394, - 0x23790f, - 0x229f88, - 0x2388c8, - 0x21c10d, - 0x21c10e, - 0x238d49, - 0x34be48, - 0x34be4f, - 0x240d0c, - 0x240d0f, - 0x241d87, - 0x24408a, - 0x2453cb, - 0x246088, - 0x247b07, - 0x38df4d, - 0x36eac6, - 0x2c6186, - 0x24a689, - 0x274708, - 0x252248, - 0x25224e, - 0x23ad87, - 0x30e505, - 0x254705, - 0x224a84, - 0x393286, - 0x3d6348, - 0x26b343, - 0x2cc04e, - 0x38e308, - 0x3a1fcb, - 0x3a9ec7, - 0x317b85, - 0x282a46, - 0x2c5387, - 0x36f048, - 0x371dc9, - 0x241645, - 0x29b648, - 0x362546, - 0x3bdbca, - 0x34a149, - 0x2410c9, - 0x2410cb, - 0x343608, - 0x3499c9, - 0x2cdc86, - 0x2801ca, - 0x28c9ca, - 0x24428c, - 0x371c47, - 0x27efca, - 0x20b30b, - 0x20b319, - 0x2dcdc8, - 0x219105, - 0x2e82c6, - 0x2e2f49, - 0x242586, - 0x311cca, - 0x219ec6, - 0x201d04, - 0x2e844d, - 0x201d07, - 0x333549, - 0x259f45, - 0x25aac8, - 0x25b289, - 0x25d444, - 0x25db47, - 0x25db48, - 0x25de87, - 0x27c888, - 0x267987, - 0x21d585, - 0x26f44c, - 0x26f849, - 0x34deca, - 0x270f89, - 0x207c09, - 0x2712cc, - 0x27330b, - 0x274108, - 0x274908, - 0x277cc4, - 0x299708, - 0x29adc9, - 0x2ccb87, - 0x21f0c6, - 0x2b6c07, - 0x2e0209, - 0x24fa8b, + 0x3011c8, + 0x295c05, + 0x3c27c4, + 0x2becc6, + 0x2383c8, + 0x290e08, + 0x339746, + 0x34d704, + 0x287d86, + 0x3ce807, + 0x288e07, + 0x2a4a4f, + 0x34ce47, 0x3991c7, - 0x2a8707, - 0x263c47, - 0x3dccc4, - 0x3dccc5, - 0x2f4f05, - 0x35ff0b, - 0x3e23c4, - 0x3db408, - 0x2bdd8a, - 0x362607, - 0x3efec7, - 0x2a3252, - 0x293a86, - 0x23c686, - 0x33610e, - 0x29f746, - 0x2a9408, - 0x2aa48f, - 0x3dd108, - 0x2584c8, - 0x31004a, - 0x310051, - 0x2bb38e, - 0x280cca, - 0x280ccc, - 0x2704c7, - 0x34c050, + 0x36f845, + 0x3da505, + 0x2b1949, + 0x2e8bc6, + 0x28ec45, + 0x2920c7, + 0x2e6dc8, + 0x238d45, + 0x35f646, + 0x21aa48, + 0x3c98ca, + 0x24d6c8, + 0x299e47, + 0x308e06, + 0x2fe6c6, + 0x2003c3, + 0x213b83, + 0x3d9489, + 0x29d6c9, + 0x2bde86, + 0x2cf105, + 0x23d0c8, + 0x3011c8, + 0x2ab488, + 0x2fe98b, + 0x2bf447, + 0x325149, + 0x2a4cc8, + 0x2738c4, + 0x205bc8, + 0x29c409, + 0x2c1205, + 0x20be07, + 0x3b6145, + 0x290d08, + 0x29f20b, + 0x2a5cd0, + 0x2bb605, + 0x21590c, + 0x240785, + 0x28e183, + 0x2d0606, + 0x2d6144, + 0x27fd86, + 0x2acc87, + 0x20df44, + 0x2d27c8, + 0x36650d, + 0x39b545, + 0x233944, + 0x360444, + 0x398f09, + 0x2a9f48, + 0x337047, + 0x2e0cc8, + 0x2912c8, + 0x28a405, + 0x3d97c7, + 0x28a387, + 0x3be0c7, + 0x279a49, + 0x373a09, + 0x24acc6, + 0x2df406, + 0x292186, + 0x32dac5, + 0x3c4904, + 0x3d0f86, + 0x3d3186, + 0x28a448, + 0x29f70b, + 0x23a547, + 0x226f84, + 0x347a46, + 0x3ee387, + 0x2fc285, + 0x258805, + 0x2266c4, + 0x373986, + 0x3d1008, + 0x2760c9, + 0x251846, + 0x294e48, + 0x2da9c6, + 0x367508, + 0x37130c, + 0x28a2c6, + 0x2a6ecd, + 0x2a734b, + 0x2c7e05, + 0x36a407, + 0x2cdf86, + 0x334988, + 0x24ad49, + 0x2bf748, + 0x3d7285, + 0x2a5647, + 0x290008, + 0x25d109, + 0x2690c6, + 0x26694a, + 0x334708, + 0x2bf58b, + 0x2dad8c, + 0x2879c8, + 0x28b686, + 0x3dc788, + 0x3c9547, + 0x3b6889, + 0x29e90d, + 0x2ab746, + 0x2c9448, + 0x2c9189, + 0x2d3e48, + 0x292cc8, + 0x2d780c, + 0x2d8707, + 0x2d9507, + 0x279885, + 0x2d1187, + 0x2e6c88, + 0x2bed46, + 0x2516cc, + 0x30b848, + 0x2e7b88, + 0x353c86, + 0x3dd907, + 0x24aec4, + 0x3539c8, + 0x2939cc, + 0x297f0c, + 0x3cf2c5, + 0x3afc47, + 0x34d686, + 0x3dd886, + 0x3a1148, + 0x221444, + 0x2269cb, + 0x258fcb, + 0x308e06, + 0x366387, + 0x34d885, + 0x280485, + 0x226b06, + 0x295bc5, + 0x24e705, + 0x223247, + 0x2200c9, + 0x203284, + 0x240c45, + 0x312105, + 0x3aab88, + 0x34f385, + 0x2d5e09, + 0x2bfe87, + 0x2bfe8b, + 0x307886, + 0x240209, + 0x3819c8, + 0x28b885, + 0x3be1c8, + 0x373a48, + 0x28bec7, + 0x22b247, + 0x24bc89, + 0x2275c7, + 0x29b3c9, + 0x2bcc4c, + 0x2bde88, + 0x2dabc9, + 0x2ddec7, + 0x291389, + 0x221b47, + 0x2dae88, + 0x3c9485, + 0x2d6dc6, + 0x2d8188, + 0x246748, + 0x3d9189, + 0x24e747, + 0x3ade05, + 0x3d8c09, + 0x387046, + 0x29e704, + 0x32a946, + 0x248788, + 0x253a87, + 0x29f908, + 0x242009, + 0x2b64c7, + 0x2ab5c6, + 0x205f04, + 0x320349, + 0x3d9648, + 0x353b47, + 0x37b106, + 0x29f646, + 0x354244, + 0x2d17c6, + 0x23be43, + 0x3d5bc9, + 0x3b9a46, + 0x275dc5, + 0x2ae506, + 0x235705, + 0x290488, + 0x3b6747, + 0x240ec6, + 0x369f46, + 0x314c88, + 0x2b1ac7, + 0x2ab785, + 0x2abf48, + 0x3e0988, + 0x334708, + 0x240645, + 0x2d6e46, + 0x27efc9, + 0x3af644, + 0x31260b, + 0x21d64b, + 0x22a589, + 0x21da03, + 0x264ac5, + 0x320a46, + 0x244fc8, + 0x2b9344, + 0x30efc6, + 0x2b5e89, + 0x37cbc5, + 0x223186, + 0x3d8146, + 0x20cc04, + 0x2fae0a, + 0x275d08, + 0x246746, + 0x379305, + 0x2055c7, + 0x340fc7, + 0x273044, + 0x21d887, + 0x245644, + 0x245646, + 0x206503, + 0x279a45, + 0x2c3905, + 0x214b08, + 0x290985, + 0x28a009, + 0x2b3fc7, + 0x35380b, + 0x2b3fcc, + 0x2b45ca, + 0x355507, + 0x210cc3, + 0x288808, + 0x307c05, + 0x238dc5, + 0x35cb84, + 0x2dad86, + 0x2764c6, + 0x2d1807, + 0x26158b, + 0x211cc4, + 0x210f84, + 0x2e0ec4, + 0x2e5fc6, + 0x20df44, + 0x2d1608, + 0x35c985, + 0x28ac85, + 0x2ab3c7, + 0x36a509, + 0x340d85, + 0x33ca8a, + 0x2b1609, + 0x2a88ca, + 0x3ea6c9, + 0x318884, + 0x37cf05, + 0x370148, + 0x3706cb, + 0x213785, + 0x2d9b46, + 0x247004, + 0x28a546, + 0x2b6349, + 0x3ee487, + 0x269f08, + 0x2b3206, + 0x3ce787, + 0x290e08, + 0x37fd46, + 0x205f84, + 0x267ec7, + 0x38a345, + 0x399b87, + 0x256484, + 0x2cdf06, + 0x341108, + 0x2a7508, + 0x302d47, + 0x34d3c8, + 0x2a5485, + 0x21d784, + 0x2ff988, + 0x316684, + 0x20cac5, + 0x3a0f44, + 0x3f3347, + 0x298587, + 0x2914c8, + 0x235746, + 0x290905, + 0x289e08, + 0x24d8c8, + 0x2b0ec9, + 0x21d946, + 0x231208, + 0x20850a, + 0x2fc308, + 0x31db05, + 0x256906, + 0x2b14c8, + 0x2a570a, + 0x35ec47, + 0x295485, + 0x2a1d88, + 0x2b8e44, + 0x25bdc6, + 0x2d9f48, + 0x3cbf86, + 0x3c38c8, + 0x2a6747, + 0x3cb0c6, + 0x2ce7c4, + 0x360e47, + 0x2ca244, + 0x2b6307, + 0x353e8d, + 0x22a605, + 0x2c684b, + 0x298186, + 0x25e008, + 0x2d2784, + 0x2319c6, + 0x28d486, + 0x3dcac7, + 0x2a6b8d, + 0x30d487, + 0x2cafc8, + 0x3adec5, + 0x302fc8, + 0x2e11c6, + 0x2a5508, + 0x230746, + 0x3dc007, + 0x3557c9, + 0x35e947, + 0x297148, + 0x2547c5, + 0x22bd48, + 0x22af85, + 0x35f585, + 0x373385, + 0x2136c3, + 0x21d584, + 0x2a1f85, + 0x24c849, + 0x37b006, + 0x31ae08, + 0x3d83c5, + 0x2cc607, + 0x371fca, + 0x2230c9, + 0x2f3e0a, + 0x2ec2c8, + 0x21fb4c, + 0x29214d, + 0x3e7703, + 0x3c37c8, + 0x212545, + 0x3c9686, + 0x210b86, + 0x360385, + 0x3bf209, + 0x2ffd45, + 0x289e08, + 0x266186, + 0x374506, + 0x2b23c9, + 0x268887, + 0x29f4c6, + 0x371f48, + 0x2fb308, + 0x2f66c7, + 0x2e650e, + 0x2e1405, + 0x25d005, + 0x3cbe88, + 0x36ad87, + 0x2084c2, + 0x2d75c4, + 0x27fc8a, + 0x353c08, + 0x373b86, + 0x2a9348, + 0x213a46, 0x3da388, - 0x2bb585, - 0x2c67ca, - 0x3ef24c, - 0x2633cd, - 0x211986, - 0x211987, - 0x21198c, - 0x21348c, - 0x23760c, - 0x2c8d8b, - 0x3ac7c4, - 0x20b3c4, - 0x2ca709, - 0x327c07, - 0x239d89, - 0x28c809, - 0x2cc787, - 0x2cc946, - 0x2cc949, - 0x2ccd43, - 0x2b314a, - 0x33a207, - 0x3ec40b, - 0x253eca, - 0x269804, - 0x3f4106, - 0x296309, - 0x3cc144, - 0x2f0eca, - 0x309345, - 0x2db705, - 0x2db70d, - 0x2dba4e, - 0x368f45, - 0x343a46, - 0x218c87, - 0x2514ca, - 0x224746, - 0x282484, - 0x301007, - 0x2e5f8b, - 0x370c87, - 0x367e04, - 0x31a606, - 0x31a60d, - 0x2f64cc, - 0x214746, - 0x21c88a, - 0x226346, - 0x3d39c8, - 0x27b587, - 0x22be0a, - 0x363cc6, - 0x2044c3, - 0x300346, - 0x254948, - 0x37beca, - 0x2a37c7, - 0x2a37c8, - 0x2a6604, - 0x293c87, - 0x3a8548, - 0x2d0988, - 0x32f7c8, - 0x2d318a, - 0x2fc3c5, - 0x247c47, - 0x280b13, - 0x28b3c6, - 0x255308, - 0x22dac9, - 0x3c4d08, - 0x3181cb, - 0x2d4148, - 0x2e60c4, - 0x3a0146, - 0x32eb86, - 0x3b05c9, - 0x2e5e47, - 0x26f548, - 0x29ac46, - 0x394584, - 0x339b85, - 0x3dc748, - 0x259b0a, - 0x2e80c8, - 0x2ec306, - 0x2b2dca, - 0x2067c8, - 0x2e0508, - 0x2f1148, - 0x2f1946, - 0x2f3706, - 0x3b564c, - 0x2f3c10, - 0x2f4005, - 0x23bc88, - 0x23bc90, - 0x3dcf10, - 0x284d4e, - 0x3b52ce, - 0x3b52d4, - 0x3ba58f, - 0x3ba946, - 0x3f0991, - 0x349bd3, - 0x20c688, - 0x3ea6c5, - 0x28f708, - 0x3263c5, - 0x351c0c, - 0x218949, - 0x224289, - 0x231107, - 0x3bdfc9, - 0x3619c7, - 0x3bfb86, - 0x3674c7, - 0x202a85, - 0x211f43, - 0x21fe83, - 0x284704, - 0x21564d, - 0x39950f, - 0x3945c5, - 0x218846, - 0x236787, - 0x235b87, - 0x2e1686, - 0x2e168b, - 0x2bc245, - 0x215a46, - 0x3baec7, - 0x268649, - 0x22b386, - 0x22e385, - 0x3ecbcb, - 0x3f2686, - 0x21b645, - 0x243cc8, - 0x2a4948, - 0x2b38cc, - 0x2b38d0, - 0x2bc389, - 0x2c12c7, - 0x331a0b, - 0x2dbfc6, - 0x31328a, - 0x3b890b, - 0x35484a, - 0x329f06, - 0x304fc5, - 0x339f46, - 0x29bb08, - 0x2a5aca, - 0x21bd9c, - 0x3b8c0c, - 0x30ae48, - 0x219085, - 0x226987, - 0x2d8486, - 0x2dab05, - 0x223606, - 0x2e1848, - 0x2d9ac7, - 0x2d8748, - 0x28b48a, - 0x2fc9cc, - 0x2544c9, - 0x3c7e07, - 0x232004, - 0x297206, - 0x25804a, - 0x28c905, - 0x22488c, - 0x224f48, - 0x311fc8, - 0x333a8c, - 0x2385cc, - 0x239949, - 0x239b87, - 0x2509cc, - 0x365984, - 0x25e28a, - 0x21dd0c, - 0x288dcb, - 0x3acf0b, - 0x3ae506, - 0x26d587, - 0x26ffc7, - 0x34c28f, - 0x319791, - 0x2fa352, - 0x26ffcd, - 0x26ffce, - 0x27030e, - 0x3ba748, - 0x3ba752, - 0x365e48, - 0x25c007, - 0x265e4a, - 0x260048, - 0x29f705, - 0x3cee4a, - 0x227887, - 0x2dc104, - 0x220203, - 0x397405, - 0x3102c7, - 0x301d07, - 0x2635ce, - 0x3e3b8d, - 0x31a2c9, - 0x289f05, - 0x3291c3, - 0x346086, - 0x272405, - 0x3a2208, - 0x20d949, - 0x2e8305, - 0x38e14f, - 0x2be7c7, - 0x2285c5, - 0x3aa88a, - 0x209ac6, - 0x2ab4c9, - 0x35c24c, - 0x36ed09, - 0x20e786, - 0x2bdb8c, - 0x373686, - 0x3e2a48, - 0x210106, - 0x27fe06, - 0x2d1204, - 0x207e03, - 0x3b188a, - 0x2a3d11, - 0x263f4a, - 0x3a9c45, - 0x26f0c7, - 0x26c847, - 0x2d4b04, - 0x3a864b, - 0x3d0888, - 0x2d7cc6, - 0x38be45, - 0x279f04, - 0x26ec09, - 0x2008c4, - 0x3d2887, - 0x33c805, - 0x33c807, - 0x336345, - 0x25ff03, - 0x25bec8, - 0x26b54a, - 0x234643, - 0x235d8a, - 0x234646, - 0x38decf, - 0x2744c9, - 0x2cbfd0, - 0x229188, - 0x2ecc09, - 0x2b0dc7, - 0x31a58f, - 0x3562c4, - 0x2ef004, - 0x2271c6, - 0x358686, - 0x3ae00a, - 0x26aa06, - 0x2c9407, - 0x31d888, - 0x31da87, - 0x31f147, - 0x32134a, - 0x31fa4b, - 0x325785, - 0x2f9f88, - 0x229783, - 0x3d880c, - 0x20d44f, - 0x32dd8d, - 0x2b7187, - 0x237209, - 0x24e147, - 0x2d4448, - 0x3ee58c, - 0x308548, - 0x256bc8, - 0x33c44e, - 0x34e914, - 0x34ee24, - 0x36c50a, - 0x38aacb, - 0x361a84, - 0x361a89, - 0x2d2708, - 0x255545, - 0x203bca, - 0x29cb07, - 0x220c04, - 0x20bec3, - 0x21a043, - 0x243544, - 0x226783, - 0x20f583, - 0x25c204, - 0x20bc83, - 0x214e43, - 0x2f3c06, - 0x225a04, - 0x214883, - 0x24acc3, - 0x205443, + 0x2c0f08, + 0x35f544, + 0x2cca05, + 0x7a24c4, + 0x7a24c4, + 0x7a24c4, + 0x2149c3, + 0x2030c6, + 0x28a2c6, + 0x2ad64c, + 0x2085c3, + 0x256486, + 0x213604, + 0x24bfc8, + 0x2b5cc5, + 0x27fd86, + 0x2b3bc8, + 0x2ed646, + 0x240e46, + 0x3d7f48, + 0x3cd647, + 0x227389, + 0x31024a, + 0x20c684, + 0x245685, + 0x311005, + 0x275ec6, + 0x233946, + 0x36a946, + 0x386dc6, + 0x2274c4, + 0x2274cb, + 0x245444, + 0x205645, + 0x2c0445, + 0x261846, + 0x20d408, + 0x292007, + 0x3d5f84, + 0x26c2c3, + 0x2b8945, + 0x32a807, + 0x291f0b, + 0x214a07, + 0x2b3ac8, + 0x2d1947, + 0x291086, + 0x269ac8, + 0x2d030b, + 0x3821c6, + 0x208c89, + 0x2d0485, + 0x32b3c3, + 0x223186, + 0x2a6648, + 0x205fc3, + 0x2ce043, + 0x290e06, + 0x213a46, + 0x37deca, + 0x28b6c5, + 0x28d2cb, + 0x2ae44b, + 0x285503, + 0x212303, + 0x2c84c4, + 0x371e07, + 0x2879c4, + 0x24bfc4, + 0x2ce244, + 0x2fc608, + 0x379248, + 0x39e5c9, + 0x2e3f08, + 0x3c5607, + 0x227806, + 0x31aa4f, + 0x2e1546, + 0x2ebc44, + 0x37908a, + 0x32a707, + 0x2ca346, + 0x29e749, + 0x39e545, + 0x275845, + 0x39e686, + 0x22be83, + 0x2b8e89, + 0x213506, + 0x241dc9, + 0x398346, + 0x279a45, + 0x3cf6c5, + 0x202b83, + 0x213948, + 0x337207, + 0x273144, + 0x24be48, + 0x23d244, + 0x316586, + 0x2d0606, + 0x242706, + 0x3cd309, + 0x238d45, + 0x2ab846, + 0x259709, + 0x2e0686, + 0x2fc686, + 0x3a99c6, + 0x223005, + 0x3a0f46, + 0x3dc004, + 0x3c9485, + 0x246744, + 0x2cb886, + 0x3ca004, + 0x205643, + 0x295105, + 0x236ec8, + 0x30f687, + 0x2b93c9, + 0x295388, + 0x2a8291, + 0x3d81ca, + 0x308d47, + 0x2a6886, + 0x213604, + 0x2d8288, + 0x365b08, + 0x2a844a, + 0x2d5bcd, + 0x213686, + 0x3d8046, + 0x360f06, + 0x31fa07, + 0x2cb085, + 0x301a07, + 0x24bf05, + 0x2bffc4, + 0x2ec0c6, + 0x23cf87, + 0x2b8b8d, + 0x2b1407, + 0x2f7a48, + 0x28a109, + 0x221d46, + 0x269045, + 0x238cc4, + 0x248886, + 0x2fd246, + 0x353d86, + 0x2a9bc8, + 0x2162c3, + 0x22b183, + 0x3329c5, + 0x290a46, + 0x2c0ec5, + 0x2b3408, + 0x2ace4a, + 0x34cf84, + 0x24bfc8, + 0x2a64c8, + 0x2bc587, + 0x22b0c9, + 0x2d2fc8, + 0x276147, + 0x2d6cc6, + 0x3cbf8a, + 0x248908, + 0x329d09, + 0x2aa008, + 0x2185c9, + 0x3857c7, + 0x3add45, + 0x2feb86, + 0x2bebc8, + 0x224488, + 0x2ac5c8, + 0x308f08, + 0x205645, + 0x229f04, + 0x234888, + 0x246d84, + 0x3ea4c4, + 0x279a45, + 0x2a0a87, + 0x36a2c9, + 0x3dc8c7, + 0x208545, + 0x2877c6, + 0x378b06, + 0x208804, + 0x2b2d46, + 0x28d704, + 0x28d006, + 0x36a086, + 0x20b046, + 0x3d7285, + 0x2b32c7, + 0x210cc3, + 0x32c109, + 0x314a88, + 0x24be44, + 0x275fcd, + 0x2a7608, + 0x2fa688, + 0x329c86, + 0x3558c9, + 0x2230c9, + 0x32bb85, + 0x2acf4a, + 0x2b07ca, + 0x37d1cc, + 0x37d346, + 0x288406, + 0x2e1b46, + 0x389a49, + 0x3c98c6, + 0x2b1b06, + 0x2ffe06, + 0x3539c8, + 0x24d6c6, + 0x2eb24b, + 0x2a0c05, + 0x28ac85, + 0x288f05, + 0x3bd9c6, + 0x21d7c3, + 0x242686, + 0x2b1387, + 0x2d8145, + 0x3b9805, + 0x356e85, + 0x2c6c06, + 0x2b6104, + 0x31de86, + 0x2a3a89, + 0x3bd84c, + 0x2bfd08, + 0x238344, + 0x39ebc6, + 0x298286, + 0x2a6648, + 0x3011c8, + 0x3bd749, + 0x2055c7, + 0x35f809, + 0x281a86, + 0x22d4c4, + 0x35d904, + 0x290744, + 0x290e08, + 0x36a10a, + 0x340d06, + 0x36f707, + 0x399e07, + 0x240305, + 0x2b6c44, + 0x29c3c6, + 0x2cb0c6, + 0x221483, + 0x3148c7, + 0x20e108, + 0x2b618a, + 0x301348, + 0x214888, + 0x3ca045, + 0x22d285, + 0x23a645, + 0x2406c6, + 0x245bc6, + 0x214c45, + 0x3d5e09, + 0x2b6a4c, + 0x360987, + 0x2a84c8, + 0x29fbc5, + 0x7a24c4, + 0x227d44, + 0x263444, + 0x20fb46, + 0x2af34e, + 0x2758c7, + 0x31fc05, + 0x3af5cc, + 0x2bc447, + 0x23cf07, + 0x23fc89, + 0x20ecc9, + 0x295485, + 0x314a88, + 0x27efc9, + 0x3345c5, + 0x2d8088, + 0x2c8f46, + 0x2ffc06, + 0x252d84, + 0x325308, + 0x24b983, + 0x202cc4, + 0x2b89c5, + 0x397747, + 0x22b5c5, + 0x2083c9, + 0x29c74d, + 0x2aa806, + 0x3f2e04, + 0x39b688, + 0x21ff0a, + 0x205887, + 0x257385, + 0x202d03, + 0x2ae60e, + 0x213a4c, + 0x318fc7, + 0x2af507, + 0x4f39db87, + 0x2a446, + 0x1ecc4, + 0x20b903, + 0x3c9905, + 0x263445, + 0x2a9708, + 0x2a6309, + 0x238246, + 0x2879c4, + 0x308c86, + 0x2445cb, + 0x36d18c, + 0x255a87, + 0x2eb6c5, + 0x3e0888, + 0x2f6485, + 0x379087, + 0x2e9907, + 0x24b985, + 0x21d7c3, + 0x32bc44, + 0x27d785, + 0x203185, + 0x203186, + 0x2a3348, + 0x23cf87, + 0x210e86, + 0x354146, + 0x3732c6, + 0x2c95c9, + 0x3d98c7, + 0x25cc06, + 0x36d306, + 0x3cc806, + 0x2bbac5, + 0x218146, + 0x3ae705, + 0x34f408, + 0x2a034b, + 0x29bf46, + 0x399e44, + 0x2fce09, + 0x2b3fc4, + 0x2c8ec8, + 0x24a2c7, + 0x292bc4, + 0x2d20c8, + 0x2d8f04, + 0x2bbb04, + 0x283685, + 0x39b586, + 0x2fc547, + 0x204803, + 0x2ab685, + 0x324c04, + 0x25d046, + 0x2b60c8, + 0x34d2c5, + 0x2a0009, + 0x355505, + 0x256488, + 0x235107, + 0x3b9b48, + 0x2d1347, + 0x399289, + 0x25b806, + 0x3ef5c6, + 0x29d984, + 0x312545, + 0x366d0c, + 0x288f07, + 0x289447, + 0x233588, + 0x2aa806, + 0x2b12c4, + 0x338704, + 0x24bb09, + 0x2e1c46, + 0x28f707, + 0x3beb84, + 0x2cd906, + 0x3d7a85, + 0x2e9787, + 0x2eb1c6, + 0x266809, + 0x2e8dc7, + 0x2a4a47, + 0x2b2246, + 0x2cd845, + 0x28eac8, + 0x213388, + 0x375c46, + 0x34d305, + 0x38c1c6, + 0x206003, + 0x2a9589, + 0x36a6ce, + 0x2d1008, + 0x23d348, + 0x375a4b, + 0x2a0246, + 0x399d04, + 0x240e44, + 0x36a7ca, + 0x215807, + 0x256845, + 0x208c89, + 0x2d6c05, + 0x3ea507, + 0x236544, + 0x29a507, + 0x2f6cc8, + 0x2c5846, + 0x2ce089, + 0x2d30ca, + 0x215786, + 0x2a7146, + 0x2c03c5, + 0x39ff05, + 0x3af087, + 0x24b008, + 0x3d79c8, + 0x35f546, + 0x3cf745, + 0x2336ce, + 0x2390c4, + 0x2a9685, + 0x287149, + 0x2e89c8, + 0x299d86, + 0x2aba4c, + 0x2aca50, + 0x2aef8f, + 0x2b1848, + 0x355507, + 0x3d7285, + 0x2a1f85, + 0x2fc3c9, + 0x2a1f89, + 0x287e86, + 0x213807, + 0x3a1045, + 0x241149, + 0x362706, + 0x3c970d, + 0x290609, + 0x24bfc4, + 0x2d0908, + 0x234949, + 0x340ec6, + 0x288a05, + 0x3ef5c6, + 0x269dc9, + 0x3bea08, + 0x2034c5, + 0x208604, + 0x2abc0b, + 0x340d85, + 0x245046, + 0x256786, + 0x3a6646, + 0x24094b, + 0x2a0109, + 0x221c85, + 0x39a747, + 0x3d8146, + 0x291846, + 0x2631c8, + 0x20cc09, + 0x2f780c, + 0x32a608, + 0x3239c6, + 0x339743, + 0x22f586, + 0x307b85, + 0x28de08, + 0x3cf146, + 0x2354c8, + 0x280f45, + 0x235805, + 0x315f48, + 0x3c2f47, + 0x210ac7, + 0x2d1807, + 0x324988, + 0x355648, + 0x2ca706, + 0x2cb6c7, + 0x3b5f87, + 0x39900a, + 0x245f03, + 0x3bd9c6, + 0x233645, + 0x257504, + 0x28a109, + 0x399204, + 0x2c5844, + 0x2ad2c4, + 0x2af50b, + 0x337147, + 0x233905, + 0x2a5188, + 0x2877c6, + 0x2877c8, + 0x28b606, + 0x29a905, + 0x29ae45, + 0x29d106, + 0x272ec8, + 0x29e688, + 0x28a2c6, + 0x2a4fcf, + 0x2a9050, + 0x20ab45, + 0x210cc3, + 0x2583c5, + 0x325088, + 0x2a1e89, + 0x334708, + 0x213608, + 0x25ee48, + 0x337207, + 0x287489, + 0x2356c8, + 0x29dd44, + 0x2ad148, + 0x3aac49, + 0x2cbc07, + 0x2d32c4, + 0x3dc988, + 0x2b308a, + 0x316d06, + 0x213686, + 0x21d809, + 0x2acc87, + 0x2e6b08, + 0x2365c8, + 0x294748, + 0x284585, + 0x3cbac5, + 0x28ac85, + 0x263405, + 0x3baa87, + 0x21d7c5, + 0x2d8145, + 0x22ed86, + 0x334647, + 0x370607, + 0x2b3386, + 0x2ec805, + 0x245046, + 0x2888c5, + 0x2f7588, + 0x383f84, + 0x2e0706, + 0x3925c4, + 0x3c27c8, + 0x21900a, + 0x28a8cc, + 0x2adc05, + 0x31fac6, + 0x2f79c6, + 0x3926c6, + 0x323a44, + 0x3eda05, + 0x28aec7, + 0x2acd09, + 0x2e63c7, + 0x7a24c4, + 0x7a24c4, + 0x336fc5, + 0x2ea844, + 0x2ab20a, + 0x287646, + 0x315ec4, + 0x3afbc5, + 0x39cc85, + 0x2cafc4, + 0x2920c7, + 0x3d8d87, + 0x2e5fc8, + 0x38c2c8, + 0x2034c9, + 0x316688, + 0x29728b, + 0x275ec4, + 0x35f745, + 0x28ecc5, + 0x2d1789, + 0x20cc09, + 0x2fcd08, + 0x245448, + 0x261844, + 0x2982c5, + 0x20ba83, + 0x275e85, + 0x2ab8c6, + 0x2a614c, + 0x213406, + 0x288906, + 0x29a005, + 0x2c6c88, + 0x2e6146, + 0x2a6a06, + 0x213686, + 0x222e4c, + 0x280044, + 0x37340a, + 0x299f48, + 0x2a5f87, + 0x324b06, + 0x238307, + 0x308885, + 0x37b106, + 0x363386, + 0x37aec7, + 0x2d2dc4, + 0x3f3445, + 0x287144, + 0x2c0047, + 0x287388, + 0x28828a, + 0x28fe87, + 0x2bb6c7, + 0x355487, + 0x2f65c9, + 0x2a614a, + 0x227483, + 0x30f645, + 0x20b083, + 0x2ce289, + 0x385908, + 0x36f847, + 0x334809, + 0x213486, + 0x2214c8, + 0x352545, + 0x24d9ca, + 0x2f7089, + 0x27ec09, + 0x2dec07, + 0x365c09, + 0x20af48, + 0x205dc6, + 0x31fc88, + 0x3d6107, + 0x2275c7, + 0x2b1607, + 0x2c8d48, + 0x39dec6, + 0x2b2e45, + 0x28aec7, + 0x2a6c48, + 0x373244, + 0x3e5784, + 0x29f3c7, + 0x2c1287, + 0x27ee4a, + 0x205d46, + 0x3edd0a, + 0x2d7507, + 0x238e87, + 0x3f3504, + 0x29b484, + 0x2e9686, + 0x3d9e44, + 0x3d9e4c, + 0x315e05, + 0x20ca49, + 0x256604, + 0x2cb085, + 0x21fe88, + 0x29e745, + 0x33ca86, + 0x2a20c4, + 0x2a414a, + 0x2de406, + 0x2bbb8a, + 0x20c647, + 0x29fa45, + 0x22be85, + 0x24034a, + 0x24ba45, + 0x2b0f86, + 0x246d84, + 0x2c8646, + 0x3af145, + 0x3cf206, + 0x302d4c, + 0x33e90a, + 0x2b08c4, + 0x227806, + 0x2acc87, + 0x2eb144, + 0x3539c8, + 0x2d9a46, + 0x399c89, + 0x37a689, + 0x2bdf89, + 0x3127c6, + 0x3d6206, + 0x31fdc7, + 0x3d5d48, + 0x3d6009, + 0x337147, + 0x2a5306, + 0x3ce807, + 0x360dc5, + 0x2390c4, + 0x31f987, + 0x3b6145, + 0x296445, + 0x38d187, + 0x24b848, + 0x3e0806, + 0x2a7acd, + 0x2a990f, + 0x2ae44d, + 0x208584, + 0x236fc6, + 0x2edf08, + 0x2ffdc5, + 0x240808, + 0x28bd8a, + 0x24bfc4, + 0x2cb986, + 0x2d4e47, + 0x211cc7, + 0x3cd709, + 0x31fc45, + 0x2cafc4, + 0x2cc94a, + 0x2d2b89, + 0x365d07, + 0x3645c6, + 0x340ec6, + 0x298206, + 0x267f86, + 0x3657cf, + 0x2eddc9, + 0x24d6c6, + 0x268c86, + 0x3db209, + 0x2cb7c7, + 0x200e83, + 0x222fc6, + 0x213b83, + 0x360248, + 0x27d507, + 0x2b1a49, + 0x2b3948, + 0x210c08, + 0x360ac6, + 0x229ec9, + 0x25e185, + 0x22f4c4, + 0x2f6f47, + 0x389ac5, + 0x208584, + 0x2339c8, + 0x215ac4, + 0x2cb507, + 0x2dd886, + 0x271105, + 0x2aa008, + 0x340d8b, + 0x314f87, + 0x2405c6, + 0x2e15c4, + 0x3d33c6, + 0x279a45, + 0x3b6145, + 0x28e849, + 0x291cc9, + 0x227604, + 0x227645, + 0x210cc5, + 0x24d846, + 0x314b88, + 0x2d6546, + 0x20df4b, + 0x2c494a, + 0x2d1445, + 0x29aec6, + 0x22f985, + 0x326885, + 0x2421c7, + 0x3bdc48, + 0x272f44, + 0x393d46, + 0x29e706, + 0x20b107, + 0x32b384, + 0x28d486, + 0x2fd085, + 0x2fd089, + 0x3d6404, + 0x311149, + 0x28a2c6, + 0x2d87c8, + 0x210cc5, + 0x399f05, + 0x3cf206, + 0x2f7709, + 0x20ecc9, + 0x288986, + 0x2e8ac8, + 0x29c888, + 0x22f944, + 0x2cd104, + 0x2cd108, + 0x3c5488, + 0x35f909, + 0x2ab846, + 0x213686, + 0x34034d, + 0x30efc6, + 0x3711c9, + 0x2fd505, + 0x39e686, + 0x3dcc08, + 0x33c9c5, + 0x3b5fc4, + 0x279a45, + 0x2916c8, + 0x2aafc9, + 0x287204, + 0x2cdf06, + 0x2eb78a, + 0x318ec8, + 0x27efc9, + 0x28ab4a, + 0x334786, + 0x2a9ac8, + 0x378e45, + 0x29a1c8, + 0x308905, + 0x213349, + 0x3429c9, + 0x226942, + 0x2d0485, + 0x28ed86, + 0x28a207, + 0x257505, + 0x2f6bc6, + 0x3833c8, + 0x2aa806, + 0x370009, + 0x289546, + 0x263048, + 0x2bc805, + 0x255346, + 0x3dc108, + 0x290e08, + 0x3856c8, + 0x321948, + 0x218144, + 0x22bc83, + 0x370244, + 0x290086, + 0x360e04, + 0x23d287, + 0x2a6909, + 0x2e0ec5, + 0x2365c6, + 0x222fc6, + 0x2a318b, + 0x2ca286, + 0x294206, + 0x2e0808, + 0x245946, + 0x29f843, + 0x213183, + 0x2390c4, + 0x231105, + 0x2da807, + 0x287388, + 0x28738f, + 0x28adcb, + 0x314988, + 0x2cdf86, + 0x314c8e, + 0x2110c3, + 0x2da784, + 0x2ca205, + 0x2cae46, + 0x29c4cb, + 0x2a0b46, + 0x21aac9, + 0x271105, + 0x2612c8, + 0x3f3c08, + 0x20eb8c, + 0x2af546, + 0x275ec6, + 0x2cf105, + 0x296f08, + 0x28a8c5, + 0x2738c8, + 0x2abdca, + 0x2ae889, + 0x7a24c4, 0x2000c2, - 0x20bec3, - 0x20b642, - 0x21a043, - 0x243544, - 0x226783, - 0x20f583, - 0x20bc83, - 0x2f3c06, - 0x214883, - 0x24acc3, - 0x1c3448, - 0x21a043, - 0x226783, - 0x204703, - 0x214883, - 0x24acc3, - 0x20bec3, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x225a04, - 0x214883, - 0x24acc3, - 0x2000c2, - 0x2332c3, - 0x20b642, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x203d82, - 0x206282, - 0x20b642, - 0x21a043, - 0x20c942, - 0x2005c2, - 0x25c204, - 0x230944, - 0x2225c2, - 0x225a04, + 0x55202202, + 0x200382, + 0x2503c4, + 0x200ec2, + 0x228f84, + 0x2018c2, 0x2003c2, - 0x24acc3, - 0x205443, - 0x3ae506, - 0x22ccc2, - 0x203342, - 0x22dec2, - 0x62e2acc3, - 0x632584c3, - 0x6d386, - 0x6d386, - 0x265a04, - 0x208c43, - 0x578d, - 0x1d61ca, - 0x1a5486, - 0x1d784c, - 0x63eae24d, - 0x9694c, - 0x64489b8f, - 0x1e8b4d, - 0x177e04, - 0x7e204, - 0xee044, - 0x14f985, - 0x9e849, - 0x6268c, - 0x133907, - 0x18b86, - 0x210c8, - 0x26647, - 0x2cb08, - 0x1c588a, - 0x11c647, - 0x628c9, - 0x64adb105, - 0xf9209, - 0x64c428cb, - 0x133248, - 0x154e0b, - 0x610b, - 0xa7788, - 0x44a0a, - 0x1830ce, - 0x650becca, - 0xe6dcd, - 0x38d4d, - 0x15c4b0b, - 0xf638a, - 0x2d784, - 0x91cc6, - 0x1a0e88, - 0xd5448, - 0x42b87, - 0x2f385, - 0x1e1307, - 0xa9cc9, - 0x1e9987, - 0xa3c8, - 0xef589, - 0x5fe44, - 0x66605, - 0x85a4e, - 0x147947, - 0x6562eac6, - 0xc398d, - 0x1e9808, - 0xf9b88, - 0x65a64f46, - 0x664a7448, - 0x18860a, - 0xb3708, - 0x144f90, - 0x6a30c, - 0x78f47, - 0x7a307, - 0x7ebc7, - 0x845c7, - 0xef42, - 0x12c307, - 0x12b8c, - 0x150105, - 0x19fb07, - 0xbe1c6, - 0xbf0c9, - 0xc1bc8, - 0x7142, + 0x202ec2, + 0x1b9688, + 0x6204, + 0x206643, + 0x21f603, + 0x205503, + 0x5cc2, + 0x510c2, + 0x206543, + 0x2109c3, + 0x21f143, + 0x24b42, + 0x5fc2, + 0x2642, + 0x250b03, + 0x206643, + 0x21f603, + 0x205503, + 0x2503c4, + 0x2109c3, + 0x21f143, + 0x214f03, + 0x214f04, + 0x206643, + 0x2392c4, + 0x21f603, + 0x2e4084, + 0x205503, + 0x2577c7, + 0x206543, + 0x21d783, + 0x23d5c8, + 0x21f143, + 0x28cc4b, + 0x309b43, + 0x212fc6, + 0x23d942, + 0x30468b, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, + 0x21f143, + 0x20ef83, + 0x224cc3, + 0x2000c2, + 0x1b9688, + 0x22c445, + 0x3b61c8, + 0x2e41c8, + 0x202202, + 0x36b145, + 0x3ce947, + 0x201342, + 0x2d29c7, + 0x200382, + 0x2594c7, + 0x23c3c9, + 0x27a288, + 0x2945c9, + 0x20d842, + 0x3af9c7, + 0x38ca04, + 0x3cea07, + 0x2c4847, + 0x2d5782, + 0x206543, + 0x203c42, + 0x2018c2, + 0x2003c2, + 0x2020c2, + 0x200902, + 0x202ec2, + 0x39ffc5, + 0x210545, + 0x2202, + 0x1f603, + 0x206643, + 0x21f603, + 0x22b483, + 0x205503, + 0x2036c3, + 0x2109c3, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, + 0x157f86, + 0x57e9df8b, + 0x206543, + 0x2109c3, + 0x21f143, + 0x157285, + 0xb4c3, + 0x101, + 0x206643, + 0x21f603, + 0x205503, + 0x2503c4, + 0x211e43, + 0x2109c3, + 0x21f143, + 0x219dc3, + 0x58854986, + 0x1a6c3, + 0xfdd45, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x202202, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x5b82, + 0x1b9688, + 0x2f843, + 0x4afc4, + 0x1484ac4, + 0xf6885, + 0x1a5643, + 0x2000c2, + 0x39ab04, + 0x206643, + 0x21f603, + 0x205503, + 0x252b83, + 0x22f2c5, + 0x211e43, + 0x20f743, + 0x2109c3, + 0x22b643, + 0x21f143, + 0x213dc3, + 0x214f83, + 0x200f83, + 0xc7f03, 0x5c2, - 0x119186, - 0x19b44b, - 0x9dc6, - 0x1786c4, - 0x1a4107, - 0x64d49, - 0x191549, - 0x1c4448, - 0x5cf02, - 0x138fc9, - 0x18108, - 0xf280a, - 0xd5dc8, - 0x66ae5b8b, - 0x1bbf09, - 0x5ff86, - 0xea449, - 0xf6307, - 0xf6bc9, - 0xf9508, - 0xfad47, - 0xfc349, - 0x1011c5, - 0x101590, - 0x10210c, - 0x1c88c6, - 0x1a4045, - 0x1d95c7, - 0x4880d, - 0x36a89, - 0x672cebc3, - 0x4f585, - 0x140806, - 0x106587, - 0x10b858, - 0x1e9d08, - 0x8b1ca, - 0x23f8e, - 0x3b02, - 0x6766020b, - 0x67aea54a, - 0x1accca, - 0x72c0d, + 0x232c2, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x2000c2, + 0x250b03, + 0x202202, + 0x23c2, + 0x21f603, + 0x205503, + 0x2503c4, + 0x2109c3, + 0x21f143, + 0x202ec2, + 0x1b9688, + 0x205503, + 0x1b9688, + 0x276803, + 0x206643, + 0x232184, + 0x21f603, + 0x205503, + 0x203042, + 0x206543, + 0x2109c3, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, + 0x203042, + 0x23ddc3, + 0x2109c3, + 0x21f143, + 0x303643, + 0x213dc3, + 0x2000c2, + 0x202202, + 0x205503, + 0x2109c3, + 0x21f143, + 0x212fc5, + 0x1f0786, + 0x72544, + 0xbdc04, + 0x214f04, + 0x23d942, + 0x882, + 0x1b9688, + 0x23c2, + 0x510c2, + 0xc642, + 0x2000c2, + 0x14cb05, + 0x20e08, + 0xb2c83, + 0x202202, + 0x3fbc4, + 0x5dd5d986, + 0x26084, + 0xba94b, + 0x3c746, + 0x82b87, + 0xa1309, + 0x21f603, + 0x4f688, + 0x4f68b, + 0x4fb0b, + 0x5088b, + 0x50bcb, + 0x50e8b, + 0x512cb, + 0x1c1b46, + 0x205503, + 0x1c9f45, + 0x1a3504, + 0x21bd03, + 0x121787, + 0x165706, + 0x137585, + 0x2044, + 0xf28c4, + 0x2109c3, + 0x88a86, + 0x11ff04, + 0x21f143, + 0x30a904, + 0x137a47, + 0x1f0389, + 0xba708, + 0x1e6785, + 0x23dc4, + 0x1ceb44, + 0x368c3, + 0x1dea03, + 0x54146, + 0x1d7808, + 0x1aea85, + 0x1a2c89, + 0x1e143, + 0x100a86, + 0x14cb05, + 0x202202, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x21d783, + 0x21f143, + 0x309b43, + 0x23d942, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x20fc83, + 0x294744, + 0x2109c3, + 0x21f143, + 0x206643, + 0x21f603, + 0x2e4084, + 0x205503, + 0x2109c3, + 0x21f143, + 0x212fc6, + 0x21f603, + 0x205503, + 0x18903, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x14cb05, + 0x82b87, + 0xc043, + 0x1e143, + 0x7442, + 0x1b9688, + 0x205503, + 0x206643, + 0x21f603, + 0x205503, + 0x6d7c3, + 0x176608, + 0x2109c3, + 0x21f143, + 0x61606643, + 0x21f603, + 0x2109c3, + 0x21f143, + 0x1b9688, + 0x2000c2, + 0x202202, + 0x206643, + 0x205503, + 0x2109c3, + 0x2003c2, + 0x21f143, + 0x342f07, + 0x3be44b, + 0x22c383, + 0x287b48, + 0x3d5ac7, + 0x38bac6, + 0x20d1c5, + 0x36b289, + 0x212d48, + 0x257bc9, + 0x257bd0, + 0x383c0b, + 0x3a8989, + 0x20c043, + 0x223ac9, + 0x232f46, + 0x232f4c, + 0x22c508, + 0x3ef408, + 0x3de109, + 0x2d390e, + 0x23c18b, + 0x2c430c, + 0x2028c3, + 0x27cdcc, + 0x2028c9, + 0x315a07, + 0x235fcc, + 0x2c5d0a, + 0x204884, + 0x2bfa0d, + 0x27cc88, + 0x33244d, + 0x282386, + 0x25364b, + 0x3f0509, + 0x268f07, + 0x3c3a86, + 0x3d3bc9, + 0x358c8a, + 0x31ed88, + 0x309744, + 0x2c1d07, + 0x231ac7, + 0x335b04, + 0x21a504, + 0x206ac9, + 0x301889, + 0x3ceec8, + 0x2cbe45, + 0x20d785, + 0x208b46, + 0x2bf8c9, + 0x325b4d, + 0x39e788, + 0x208a47, + 0x20d248, + 0x237906, + 0x232b84, + 0x266485, + 0x3ea3c6, + 0x3ecf04, + 0x2027c7, + 0x204e4a, + 0x20eac4, + 0x2156c6, + 0x217509, + 0x21750f, + 0x2182cd, + 0x218806, + 0x220a10, + 0x220e06, + 0x2227c7, + 0x223407, + 0x22340f, + 0x223ec9, + 0x2270c6, + 0x227b47, + 0x227b48, + 0x227e89, + 0x3c1988, + 0x31c607, + 0x229a03, + 0x22e3c6, + 0x336ac8, + 0x2d3bca, + 0x202f09, + 0x212e83, + 0x36b046, + 0x393b8a, + 0x2345c7, + 0x31584a, + 0x373e4e, + 0x224006, + 0x321d07, + 0x25e586, + 0x202986, + 0x3cb8cb, + 0x3c1c4a, + 0x3f384d, + 0x3d62c7, + 0x2fff88, + 0x2fff89, + 0x2fff8f, + 0x2b954c, + 0x381149, + 0x2bb04e, + 0x2578ca, + 0x3796c6, + 0x2fbb86, + 0x323e8c, + 0x3f158c, + 0x32b988, + 0x35e847, + 0x21c285, + 0x3cebc4, + 0x20220e, + 0x21ca44, + 0x3d3907, + 0x3b3a8a, + 0x3ebfd4, + 0x22d6cf, + 0x2235c8, + 0x22e288, + 0x20f38d, + 0x20f38e, + 0x22e709, + 0x349208, + 0x34920f, + 0x235ccc, + 0x235ccf, + 0x236d07, + 0x23a08a, + 0x23afcb, + 0x23b988, + 0x23dc87, + 0x271d8d, + 0x3022c6, + 0x2bfbc6, + 0x242509, + 0x272348, + 0x248248, + 0x24824e, + 0x26d447, + 0x30d045, + 0x24a485, + 0x21a384, + 0x38bd86, + 0x3cedc8, + 0x25f1c3, + 0x2c544e, + 0x272148, + 0x21e20b, + 0x2769c7, + 0x35f385, + 0x27cf46, + 0x2be707, + 0x34e508, + 0x375209, + 0x2329c5, + 0x295148, + 0x30f386, + 0x3b31ca, + 0x202109, + 0x236089, + 0x23608b, + 0x347308, + 0x3359c9, + 0x2c8a46, + 0x27b28a, + 0x2853ca, + 0x23a28c, + 0x273407, + 0x27a08a, + 0x3c4d0b, + 0x3c4d19, + 0x2d66c8, + 0x213045, + 0x271f46, + 0x3798c9, + 0x35df86, + 0x2e488a, + 0x2064c6, + 0x2e2504, + 0x2e250d, + 0x33b487, + 0x35ee09, + 0x24ec45, + 0x24ef08, + 0x24f449, + 0x251604, + 0x251cc7, + 0x251cc8, + 0x252007, + 0x277dc8, + 0x25ca47, + 0x269285, + 0x265b8c, + 0x265f89, + 0x32920a, + 0x268709, + 0x223bc9, + 0x268a4c, + 0x26c18b, + 0x26d008, + 0x26d948, + 0x270d04, + 0x292648, + 0x293349, + 0x2c5dc7, + 0x217746, + 0x2ad487, + 0x370d89, + 0x245dcb, + 0x3aef07, + 0x2a0887, + 0x256687, + 0x3323c4, + 0x3323c5, + 0x3ab045, + 0x35be4b, + 0x3e4bc4, + 0x2dc688, + 0x2bd0ca, + 0x30f447, + 0x3ef007, + 0x29bad2, + 0x28cf06, + 0x231386, + 0x3da74e, + 0x298b06, + 0x2a1c08, + 0x2a2c8f, + 0x332808, + 0x296988, + 0x312bca, + 0x312bd1, + 0x2b360e, + 0x27b9ca, + 0x27b9cc, + 0x25d947, + 0x349410, + 0x3d3208, + 0x2b3805, + 0x2befca, + 0x3ecf4c, + 0x20bf0d, + 0x3cd906, + 0x3cd907, + 0x3cd90c, + 0x3f3dcc, + 0x211e4c, + 0x32cf0b, + 0x3a5bc4, + 0x21d984, + 0x2c3bc9, + 0x338787, + 0x22e049, + 0x285209, + 0x2c59c7, + 0x2c5b86, + 0x2c5b89, + 0x2c5f83, + 0x2aa90a, + 0x336987, + 0x3dd24b, + 0x3f36ca, + 0x259604, + 0x3ee686, + 0x290109, + 0x3bf3c4, + 0x2ebcca, + 0x307cc5, + 0x2d5005, + 0x2d500d, + 0x2d534e, + 0x363545, + 0x341bc6, + 0x212bc7, + 0x23884a, + 0x21cd46, + 0x2f46c4, + 0x2f8c47, + 0x3e114b, + 0x2fe247, + 0x28c284, + 0x318046, + 0x31804d, + 0x2f120c, + 0x210886, + 0x39e98a, + 0x21d406, + 0x222488, + 0x23a947, + 0x2665ca, + 0x351986, + 0x28d503, + 0x3ca106, + 0x24a6c8, + 0x375d0a, + 0x29a347, + 0x29a348, + 0x29c044, + 0x28d107, + 0x3870c8, + 0x235848, + 0x2cc748, + 0x2ccb4a, + 0x2dfa85, + 0x23ddc7, + 0x27b813, + 0x286446, + 0x235ac8, + 0x225449, + 0x2d2888, + 0x360b4b, + 0x2ce4c8, + 0x30ce84, + 0x316046, + 0x32c586, + 0x39b3c9, + 0x2dfe47, + 0x265c88, + 0x36aa46, + 0x38d084, + 0x336305, + 0x3d7408, + 0x20150a, + 0x2e2188, + 0x2e7786, + 0x2a9cca, + 0x203308, + 0x3a9dc8, + 0x2ebf48, + 0x2ec4c6, + 0x2ee106, + 0x3ac9cc, + 0x2ee6d0, + 0x2eeac5, + 0x320688, + 0x320690, + 0x332610, + 0x257a4e, + 0x3ac64e, + 0x3ac654, + 0x3b0b0f, + 0x3b0ec6, + 0x3efd91, + 0x3474d3, + 0x3c3c08, + 0x3c3205, + 0x289788, + 0x3eabc5, + 0x34f10c, + 0x212349, + 0x21c889, + 0x229747, + 0x3b35c9, + 0x35db47, + 0x3a3046, + 0x266287, + 0x28b345, + 0x20b503, + 0x218903, + 0x27fb84, + 0x3d228d, + 0x3f1dcf, + 0x38d0c5, + 0x212246, + 0x3b74c7, + 0x22c287, + 0x2d0c46, + 0x2d0c4b, + 0x2b4785, + 0x21e0c6, + 0x3b1d87, + 0x25dc49, + 0x369dc6, + 0x21e6c5, + 0x33bccb, + 0x3cd206, + 0x222b85, + 0x252c08, + 0x29d4c8, + 0x2b48cc, + 0x2b48d0, + 0x2b6f49, + 0x2c7747, + 0x2cc28b, + 0x2f6986, + 0x31c4ca, + 0x2b054b, + 0x34e74a, + 0x371946, + 0x303505, + 0x3366c6, + 0x293d08, + 0x29e14a, + 0x20f01c, + 0x309c0c, + 0x309f08, + 0x212fc5, + 0x21f807, + 0x2b2b46, + 0x2d3fc5, + 0x21b886, + 0x2d0e08, + 0x2d2e07, + 0x2d3808, + 0x28650a, + 0x2f60cc, + 0x25f449, + 0x21f247, + 0x2282c4, + 0x224606, + 0x29650a, + 0x285305, + 0x21a18c, + 0x21a848, + 0x22d0c8, + 0x22abcc, + 0x39598c, + 0x22dc09, + 0x22de47, + 0x24744c, + 0x233dc4, + 0x24b48a, + 0x217d0c, + 0x28274b, + 0x39450b, + 0x3a6386, + 0x25c1c7, + 0x25d447, + 0x34964f, + 0x317051, + 0x2f37d2, + 0x25d44d, + 0x25d44e, + 0x25d78e, + 0x3b0cc8, + 0x3b0cd2, + 0x241848, + 0x2501c7, + 0x256eca, + 0x24b2c8, + 0x298ac5, + 0x3ba8ca, + 0x221347, + 0x2e3184, + 0x24e583, + 0x38ff05, + 0x312e47, + 0x2f9947, + 0x20c10e, + 0x31618d, + 0x317d09, + 0x20e7c5, + 0x326403, + 0x344206, + 0x26a945, + 0x21e448, + 0x33c149, + 0x271f85, + 0x271f8f, + 0x2baa47, + 0x20d045, + 0x27738a, + 0x20ae06, + 0x2a8c49, + 0x35964c, + 0x37e909, + 0x2125c6, + 0x2bcecc, + 0x37f846, + 0x3e6888, + 0x315546, + 0x27aec6, + 0x2ca404, + 0x222383, + 0x2dfbca, + 0x29cad1, + 0x38130a, + 0x265745, + 0x268287, + 0x262a47, + 0x2d0044, + 0x3871cb, + 0x294448, + 0x2d0a06, + 0x233605, + 0x273d04, + 0x275389, + 0x2008c4, + 0x3ed887, + 0x387e05, + 0x387e07, + 0x3da985, + 0x260ac3, + 0x250088, + 0x277a0a, + 0x204803, + 0x22c48a, + 0x204806, + 0x271d0f, + 0x26d3c9, + 0x2c53d0, + 0x3a7648, + 0x2e7c89, + 0x2a7907, + 0x317fcf, + 0x334bc4, + 0x2e4104, + 0x220c86, + 0x3b6d46, + 0x34fd4a, + 0x273706, + 0x2c28c7, + 0x31c948, + 0x31cb47, + 0x31dc47, + 0x320bca, + 0x31e64b, + 0x302045, + 0x2f3408, + 0x21ff83, + 0x3d118c, + 0x21c00f, + 0x23cc0d, + 0x29ab07, + 0x22ce09, + 0x284107, + 0x2d91c8, + 0x3ec1cc, + 0x30cd88, + 0x24d408, + 0x33820e, + 0x34ba94, + 0x34bfa4, + 0x3672ca, + 0x38420b, + 0x35dc04, + 0x35dc09, + 0x2cba08, + 0x24b745, + 0x3d58ca, + 0x296287, + 0x22ffc4, + 0x250b03, + 0x206643, + 0x2392c4, + 0x21f603, + 0x205503, + 0x2503c4, + 0x211e43, + 0x206543, + 0x2ee6c6, + 0x294744, + 0x2109c3, + 0x21f143, + 0x219683, + 0x2000c2, + 0x250b03, + 0x202202, + 0x206643, + 0x2392c4, + 0x21f603, + 0x205503, + 0x211e43, + 0x2ee6c6, + 0x2109c3, + 0x21f143, + 0x1b9688, + 0x206643, + 0x21f603, + 0x3d6403, + 0x2109c3, + 0x21f143, + 0x250b03, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x294744, + 0x2109c3, + 0x21f143, + 0x2000c2, + 0x38a7c3, + 0x202202, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x201782, + 0x202dc2, + 0x202202, + 0x206643, + 0x211ac2, + 0x2005c2, + 0x2503c4, + 0x228f84, + 0x21e002, + 0x294744, + 0x2003c2, + 0x21f143, + 0x219683, + 0x3a6386, + 0x224b42, + 0x202642, + 0x225842, + 0x63e03ec3, + 0x64255983, + 0x63586, + 0x63586, + 0x214f04, + 0x21d783, + 0x1dec0d, + 0x1cec4a, + 0x1a1246, + 0x1d01cc, + 0x64f1f14d, + 0x8f28c, + 0x6545484f, + 0x1d8f0d, + 0x79184, + 0x169044, + 0xcdc84, + 0x14cb05, + 0x95709, + 0xa0fcc, + 0x342c7, + 0x12ac6, + 0x19288, + 0x1f4c7, + 0x24988, + 0x1bb4ca, + 0x11b487, + 0xa1209, + 0x65ad45c5, + 0xf48c9, + 0x65c37e4b, + 0x1511cb, + 0x2c4b, + 0x172bc8, + 0x16128a, + 0x17c88e, + 0x660b74ca, + 0xe35cd, + 0x2e70d, + 0x14d268b, + 0xf10ca, + 0x26084, + 0x8a646, + 0x1896c8, + 0xc9a08, + 0x38107, + 0x26e45, + 0x1e3b07, + 0xa24c9, + 0x1d9d47, + 0x7908, + 0x10f849, + 0x60a04, + 0x685c5, + 0x15440e, + 0x1455c7, + 0x666271c6, + 0xbc84d, + 0x1d9bc8, + 0xf3008, + 0x66a80986, + 0x674b2788, + 0x182c0a, + 0x64348, + 0x143110, + 0x6048c, + 0x72c07, + 0x74107, + 0x79c87, + 0x7fa47, + 0x8b02, + 0x12a387, + 0x1c1e0c, + 0x14d05, + 0xcc107, + 0xb6e06, + 0xb78c9, + 0xbac08, + 0x15fc2, + 0x5c2, + 0x116a86, + 0x194e0b, + 0x173cc6, + 0x1de684, + 0x1cf8c7, + 0x80789, + 0x1e0b49, + 0x1ba688, + 0x510c2, + 0x19a989, + 0x11508, + 0xf0b8a, + 0xceb48, + 0x67ae098b, + 0x1db9c9, + 0x4b206, + 0xe5a49, + 0xf1047, + 0xf1909, + 0xf2a48, + 0xf4087, + 0xf5a49, + 0xf8e05, + 0xf91d0, + 0xf9d4c, + 0x181b86, + 0x1cf805, + 0xd9807, + 0x4350d, + 0x1b77c9, + 0x682c88c3, + 0x47185, + 0x1cbd46, + 0x104ac7, + 0x10a918, + 0x1da0c8, + 0x8624a, + 0x1c58e, + 0x10002, + 0x6865228b, + 0x68ae5b4a, + 0x1942ca, + 0x6584d, 0x1042, - 0xe39c6, - 0x1b506, - 0xc32c8, - 0xc0e0a, - 0x5e0c8, - 0x1c3309, - 0x11e848, - 0x7ca4e, - 0x19d08, - 0x146087, - 0x67ea7384, - 0xd708d, - 0xd2a48, - 0x115785, - 0x14c8, - 0x42248, - 0x680268ca, - 0x686bad88, - 0x11b446, - 0xba42, - 0xd77c4, - 0x7ad46, - 0x68923d88, - 0x5f986, - 0x690c6b0b, - 0x79c2, - 0x64244e04, - 0x27e83, - 0x1b9789, - 0x1f88, - 0x14a587, - 0x359ca, - 0xc0b47, + 0xdd0c6, + 0x15d46, + 0xc20c8, + 0xba0ca, + 0x5a3c8, + 0x1b9549, + 0x11d908, + 0x74c8e, + 0x6308, + 0x144207, + 0x68eb26c4, + 0xcfc4d, + 0xcbd48, + 0x113845, + 0x146f48, + 0x69381f09, + 0x371c8, + 0x6941f74a, + 0x4042, + 0x69ab24c8, + 0x119e46, + 0x5fc2, + 0xd0504, + 0x74b46, + 0x69d23b48, + 0x54146, + 0x6a4de50b, + 0x3642, + 0x6523ab84, + 0x21943, + 0x16b449, + 0x1908, + 0x2547, + 0x2c0ca, + 0x71687, 0x401, 0x81, - 0x18f607, - 0x11a408, - 0xcd208, - 0xcd408, - 0xcd608, - 0x73d47, - 0xb98c3, - 0x65e44e04, - 0x662aba43, + 0x188647, + 0x117e48, + 0xc70c8, + 0xc72c8, + 0xc74c8, + 0x6cbc7, + 0xa8643, + 0x66e3ab84, + 0x672d1fc3, 0xc1, - 0xfeb46, + 0xfc986, 0xc1, 0x201, - 0xfeb46, - 0xb98c3, - 0x66e642c4, - 0x197b44, - 0x20f85, - 0x166d85, - 0x1a4244, - 0x1a184, - 0x5d344, - 0x1416c47, - 0x1464207, - 0x1cd6c8, - 0x161e4c, + 0xfc986, + 0xa8643, + 0x67e4ac44, + 0x190d04, + 0xe985, + 0x39f45, + 0x1cfa04, + 0x6784, + 0x51504, + 0x1410087, + 0x144ab87, + 0x1c7448, + 0x1c148c, 0xc01, - 0x12f03, - 0x2af84, - 0x30705, - 0x1cd6c8, - 0x68fcd6c8, - 0x71783, - 0x83343, - 0x18ec3, - 0x1d3b47, - 0x80c7, - 0x15e2945, - 0x64744, - 0x79087, - 0xb642, - 0x166d44, - 0xc2bca, - 0x2346c4, - 0x21a043, - 0x2607c4, - 0x25c204, - 0x214883, - 0x22d985, - 0x221c03, - 0x2423c3, - 0x33a505, + 0x14f83, + 0x1ecc4, + 0x1bd044, + 0x28d45, + 0x1c7448, + 0x6a3c7448, + 0x68f03, + 0x7e583, + 0x12e03, + 0x22607, + 0x4a07, + 0x15e5145, + 0x56344, + 0x72d47, + 0x2202, + 0x39f04, + 0x1e0f4a, + 0x204884, + 0x206643, + 0x2554c4, + 0x2503c4, + 0x2109c3, + 0x225305, + 0x219dc3, + 0x237343, + 0x33d845, 0x200f83, - 0x29f83, - 0x6a61a043, - 0x226783, - 0x607c4, - 0x7783, - 0x20f583, + 0x235c3, + 0x6ba06643, + 0x21f603, + 0x554c4, + 0x3b43, + 0x205503, 0x200181, - 0x22043, - 0x214e43, - 0x230944, - 0x225a04, - 0x214883, - 0x62243, - 0x24acc3, - 0x219303, - 0x1c3448, + 0xf743, + 0x206543, + 0x228f84, + 0x294744, + 0x2109c3, + 0x2b643, + 0x21f143, + 0x213dc3, + 0x1b9688, 0x2000c2, - 0x20bec3, - 0x20b642, - 0x21a043, - 0x226783, - 0x204703, + 0x250b03, + 0x202202, + 0x206643, + 0x21f603, + 0x3d6403, 0x2005c2, - 0x25c204, - 0x20bc83, - 0x214e43, - 0x214883, - 0x208c43, - 0x24acc3, + 0x2503c4, + 0x211e43, + 0x206543, + 0x2109c3, + 0x21d783, + 0x21f143, 0x200f83, - 0xd384, - 0x1c3448, - 0x110cc7, - 0xb642, - 0x139b05, - 0x6a44f, - 0xe6a46, - 0x10210c, - 0x1470708, - 0x11eb4e, - 0x6b602242, - 0x23b308, - 0x3d6906, - 0x2e20c6, - 0x3a4c07, - 0x6ba09582, - 0x6becbe48, - 0x232a8a, - 0x2789c8, + 0x1b9688, + 0x10ea47, + 0x2202, + 0x136285, + 0x605cf, + 0xe3246, + 0xf9d4c, + 0x147e248, + 0x6ca01bc2, + 0x3dae48, + 0x3cf386, + 0x2db106, + 0x39d947, + 0x6ce087c2, + 0x6d2c5248, + 0x229cca, + 0x272688, 0x200b02, - 0x33a049, - 0x3257c7, - 0x21f046, - 0x25bc09, - 0x247d84, - 0x392ec6, - 0x2e24c4, - 0x201904, - 0x26e989, - 0x31cb06, - 0x312885, - 0x27d445, - 0x239547, - 0x2cc4c7, - 0x381d44, - 0x364606, - 0x3cdf45, - 0x2201c5, - 0x23a185, - 0x251e47, - 0x3a9d05, - 0x25b709, - 0x348a05, - 0x36f184, - 0x224687, - 0x3d490e, - 0x3eb909, - 0x335fc9, - 0x3cbb86, - 0x24d7c8, - 0x37f6cb, - 0x3716cc, - 0x32e686, - 0x2cad07, - 0x2fdfc5, - 0x315a0a, - 0x3d6549, + 0x3367c9, + 0x302087, + 0x2176c6, + 0x24fdc9, + 0x23df04, + 0x38b9c6, + 0x2db504, + 0x220044, + 0x264b09, + 0x31b946, + 0x227d45, + 0x2783c5, + 0x22f007, + 0x334087, + 0x3edf44, + 0x360406, + 0x2c6485, + 0x3f31c5, + 0x22f8c5, + 0x237ac7, + 0x276805, + 0x24f8c9, + 0x3dc545, + 0x34e644, + 0x21cc87, + 0x33b00e, + 0x346ac9, + 0x3da609, + 0x3bde06, + 0x243f48, + 0x378c0b, + 0x2fec8c, + 0x32db46, + 0x2c41c7, + 0x2f8385, + 0x313aca, + 0x3cefc9, 0x200ac9, - 0x20b046, - 0x3bac85, - 0x255205, - 0x37afc9, - 0x23a30b, - 0x23ce46, - 0x35ba46, - 0x20cc44, - 0x25a746, - 0x30e588, - 0x3d7746, - 0x363b86, - 0x207408, - 0x20aa87, - 0x20ae09, - 0x20df45, - 0x1c3448, - 0x3e1284, - 0x31f6c4, - 0x2139c5, - 0x34d509, - 0x22d407, - 0x22d40b, - 0x22e5ca, - 0x231045, - 0x6c208982, - 0x220587, - 0x6c6327c8, - 0x3791c7, - 0x3b1405, - 0x2457ca, - 0xb642, - 0x29258b, - 0x29410a, - 0x27c446, - 0x211343, - 0x21784d, - 0x2090cc, - 0x21ce8d, - 0x246405, - 0x3ab085, - 0x26b387, - 0x22c209, - 0x232986, - 0x26a885, - 0x315448, - 0x25a643, - 0x2ef3c8, - 0x25a648, - 0x3cb287, - 0x3f39c8, - 0x362349, - 0x2e0d87, - 0x23a807, - 0x3e27c8, - 0x380384, - 0x380387, - 0x2873c8, - 0x36d0c6, - 0x3d144f, - 0x238107, - 0x364106, - 0x239cc5, - 0x22e043, - 0x257347, - 0x396b43, - 0x25e506, - 0x264646, - 0x266206, - 0x2a7c85, - 0x27c883, - 0x338c48, - 0x3ab5c9, - 0x26638b, - 0x266748, - 0x267645, - 0x269105, - 0x6ca698c2, - 0x367589, - 0x3d9307, - 0x215ac5, - 0x26e887, - 0x26fe86, - 0x270ac5, - 0x27224b, - 0x274104, - 0x278585, - 0x2786c7, - 0x28d046, - 0x28d405, - 0x299b47, - 0x29a787, - 0x383744, - 0x26248a, - 0x2beec8, - 0x37f989, - 0x349405, - 0x27c006, - 0x30e74a, - 0x27d346, - 0x320dc7, - 0x27f34d, - 0x2bbd89, - 0x38ae05, - 0x3d4e47, - 0x20c848, - 0x20cd08, - 0x3485c7, - 0x335c86, - 0x215f87, - 0x26e303, - 0x31ca84, - 0x38c645, - 0x3b9c87, - 0x3c4049, - 0x22c048, - 0x320cc5, - 0x240244, - 0x25a985, - 0x39080d, - 0x20ad02, - 0x2264c6, - 0x2cdb86, - 0x31050a, - 0x3a8b46, - 0x3b6905, - 0x3938c5, - 0x3938c7, - 0x3bda0c, - 0x2b358a, - 0x2a22c6, - 0x20d2c5, - 0x25a586, - 0x2a3087, - 0x2a57c6, - 0x2a7b8c, - 0x25bd49, - 0x6ce4dcc7, - 0x2aa845, - 0x2aa846, - 0x2aac08, - 0x25a205, - 0x2bc5c5, - 0x2bca88, - 0x2bcc8a, - 0x6d21dd82, - 0x6d610682, - 0x21e645, - 0x244583, - 0x33ef48, - 0x21ffc3, - 0x2bcf04, - 0x2ab60b, - 0x371ac8, - 0x39fcc8, - 0x6db5cb49, - 0x2c2fc9, - 0x2c3886, - 0x2c5008, - 0x2c5209, - 0x2c6dc6, - 0x2c6f45, - 0x259106, - 0x2c7409, - 0x2da7c7, - 0x253286, - 0x359787, - 0x3a9987, - 0x3a8104, - 0x6dfe2609, - 0x2dad48, - 0x2cbd48, - 0x3947c7, - 0x2e7d46, - 0x208ec9, - 0x2e2787, - 0x347c4a, - 0x3c8188, - 0x27c147, - 0x2efac6, - 0x2a174a, - 0x3db8c8, - 0x2ed305, - 0x2c11c5, - 0x30b487, - 0x38ed49, - 0x32258b, - 0x338108, - 0x348a89, - 0x266a47, - 0x2d5fcc, - 0x2d668c, - 0x2d698a, - 0x2d6c0c, - 0x2e2048, - 0x2e2248, - 0x2e2444, - 0x2e3189, - 0x2e33c9, - 0x2e360a, - 0x2e3889, - 0x2e3c07, - 0x34c88c, - 0x379586, - 0x27ed08, - 0x27d406, - 0x2f5fc6, - 0x38ad07, - 0x39d088, - 0x39348b, - 0x20b147, - 0x26f209, - 0x296bc9, - 0x24e507, - 0x2e2704, - 0x367787, - 0x2cf0c6, - 0x2194c6, - 0x21ca45, - 0x2c2dc8, - 0x3618c4, - 0x3618c6, - 0x2b344b, - 0x28dd09, - 0x251d46, - 0x22c4c9, - 0x213a86, - 0x39ac08, - 0x20a243, - 0x3bae05, - 0x222149, - 0x3cb985, - 0x302884, - 0x390f86, - 0x3bcbc5, - 0x26a686, - 0x323247, - 0x2fe346, - 0x3bb84b, - 0x2800c7, - 0x28d706, - 0x2921c6, - 0x239606, - 0x381d09, - 0x2d394a, - 0x2fac05, - 0x24bd4d, - 0x2bcd86, - 0x2d4b86, - 0x229086, - 0x3d3945, - 0x301887, - 0x268c87, - 0x31ce8e, - 0x214e43, - 0x2e7d09, - 0x3bf489, - 0x315e07, - 0x281887, - 0x3d3845, - 0x2d0b05, - 0x6e3ed28f, - 0x2ece47, - 0x2ed008, - 0x2edc44, - 0x2eddc6, - 0x6e654782, - 0x2f1bc6, - 0x2f3c06, - 0x10210c, - 0x20220e, - 0x2ef20a, - 0x232146, - 0x21978a, - 0x206b89, - 0x249205, - 0x32a108, - 0x3e3e46, - 0x2ca908, - 0x379b48, - 0x2911cb, - 0x3a4d05, - 0x3a9d88, - 0x20754c, - 0x3b12c7, - 0x265d86, - 0x3bca08, - 0x393148, - 0x6ea18882, - 0x392c8b, - 0x20e149, - 0x20a049, - 0x3b7f47, - 0x3666c8, - 0x6efeae08, - 0x25fbcb, - 0x257a09, - 0x26fb4d, - 0x32cd48, - 0x2d7dc8, - 0x6f201802, - 0x222ac4, - 0x6f6445c2, - 0x36d586, - 0x6fa03282, - 0x308fca, - 0x273186, - 0x22f0c8, - 0x25c4c8, - 0x26b9c6, - 0x2cb206, - 0x30f086, - 0x3a2185, - 0x2468c4, - 0x6ff8dd84, - 0x360946, - 0x269e87, - 0x70290607, - 0x39f3cb, - 0x3793c9, - 0x3ab0ca, - 0x393a04, - 0x2e2a48, - 0x25304d, - 0x3098c9, - 0x309b08, - 0x309d89, - 0x30b844, - 0x2bc884, - 0x295a45, - 0x253c8b, - 0x371a46, - 0x360785, - 0x248449, - 0x3646c8, - 0x270b04, - 0x315b89, - 0x3bb785, - 0x2cc508, - 0x23aec7, - 0x3363c8, - 0x296506, - 0x213dc7, - 0x2f8dc9, - 0x3ecd49, - 0x21b6c5, - 0x2462c5, - 0x70623cc2, - 0x36ef44, - 0x2fcc45, - 0x3a4b06, - 0x328085, - 0x2691c7, - 0x328645, - 0x28a984, - 0x3cbc46, - 0x26a907, - 0x2547c6, - 0x371f85, - 0x21dfc8, - 0x3d6b05, - 0x221fc7, - 0x22c6c9, - 0x28de4a, - 0x234787, - 0x23478c, - 0x312846, - 0x24ab49, - 0x252105, - 0x25a148, - 0x21b503, - 0x2cdc45, - 0x2fc705, - 0x243007, - 0x70a00b82, - 0x305cc7, - 0x2d7386, - 0x389c86, - 0x2e8f06, - 0x393086, - 0x256848, - 0x28f845, - 0x3641c7, - 0x3641cd, - 0x220203, - 0x379a45, - 0x3aa647, - 0x306008, - 0x3aa205, - 0x221708, - 0x230406, - 0x2aba87, - 0x2f5f05, - 0x3a4d86, - 0x3391c5, - 0x35388a, - 0x2fb186, - 0x2dda87, - 0x2350c5, - 0x2fdb87, - 0x300f84, - 0x302806, - 0x32a045, - 0x23628b, - 0x2cef49, - 0x36838a, - 0x21b748, - 0x30c4c8, - 0x30e14c, - 0x313c87, - 0x3166c8, - 0x35f308, - 0x36d6c5, - 0x322bca, - 0x3291c9, - 0x70e01b82, - 0x2a8506, - 0x22bbc4, - 0x22bbc9, - 0x2868c9, - 0x31b047, - 0x284407, - 0x28c689, - 0x2d3388, - 0x2d338f, - 0x216986, - 0x2f594b, - 0x272085, - 0x272087, - 0x33fc89, - 0x22ce06, - 0x315b07, - 0x2fa6c5, - 0x23e6c4, - 0x358386, - 0x21cdc4, - 0x2d2e87, - 0x33d8c8, - 0x713bab88, - 0x3c18c5, - 0x3ccb87, - 0x2dcf49, - 0x21c584, - 0x24f148, - 0x7170b2c8, - 0x2d4b04, - 0x312f48, - 0x20c5c4, - 0x21cc89, - 0x228fc5, - 0x71a49902, - 0x2169c5, - 0x2d6545, - 0x20cb48, - 0x241bc7, - 0x71e008c2, - 0x3b8405, - 0x2f0386, - 0x266086, - 0x36ef08, - 0x346a48, - 0x328046, - 0x32c486, - 0x310d89, - 0x389bc6, - 0x22cccb, - 0x351f85, - 0x3ae706, - 0x35a188, - 0x36ebc6, - 0x2a9b46, - 0x222dca, - 0x283c4a, - 0x390b05, - 0x27c587, - 0x257e46, - 0x72207882, - 0x3aa787, - 0x3e2105, - 0x30e6c4, - 0x30e6c5, - 0x2e2946, - 0x328bc7, - 0x2271c5, - 0x283dc4, - 0x3cdc48, - 0x2a9c05, - 0x2fbbc7, - 0x334e05, - 0x3537c5, - 0x231704, - 0x231709, - 0x3cdd88, - 0x201106, - 0x21a506, - 0x20dbc6, - 0x726c8fc8, - 0x30c347, - 0x34664d, - 0x3de28c, - 0x3e6d89, - 0x31b5c9, - 0x72b84342, - 0x3e60c3, - 0x24e603, - 0x2cf185, - 0x3b9d8a, - 0x346486, - 0x3afd85, - 0x323644, - 0x32364b, - 0x33d18c, - 0x33dacc, - 0x33ddd5, - 0x33eb0d, - 0x34098f, - 0x340d52, - 0x3411cf, - 0x341592, - 0x341a13, - 0x341ecd, - 0x34248d, - 0x34280e, - 0x34310e, - 0x34380c, - 0x343bcc, - 0x34400b, - 0x344a8e, - 0x345392, - 0x34624c, - 0x346c50, - 0x355292, - 0x35648c, - 0x356b4d, - 0x356e8c, - 0x35ad11, - 0x35bbcd, - 0x35db8d, - 0x35e18a, - 0x35e40c, - 0x35fccc, - 0x36048c, - 0x36100c, - 0x366ed3, - 0x368610, - 0x368a10, - 0x36908d, - 0x36968c, - 0x36c249, - 0x36fb4d, - 0x36fe93, - 0x3724d1, - 0x372cd3, - 0x37380f, - 0x373bcc, - 0x373ecf, - 0x37428d, - 0x37488f, - 0x374c50, - 0x3756ce, - 0x37b80e, - 0x37c150, - 0x37cd0d, - 0x37d68e, - 0x37da0c, - 0x37f153, - 0x38154e, - 0x381f50, - 0x382351, - 0x38278f, - 0x382b53, - 0x383ecd, - 0x38420f, - 0x3845ce, - 0x384b50, - 0x384f49, - 0x386310, - 0x38680f, - 0x386e8f, - 0x387252, - 0x387e8e, - 0x38984d, - 0x389e0d, - 0x38a14d, - 0x38af4d, - 0x38b28d, - 0x38b5d0, - 0x38b9cb, - 0x38c40c, - 0x38c78c, - 0x38cd8c, - 0x38d08e, - 0x39ae10, - 0x39c9d2, - 0x39ce4b, - 0x39d28e, - 0x39d60e, - 0x39de8e, - 0x39e40b, - 0x72f9e796, - 0x3a074d, - 0x3a1714, - 0x3a24cd, - 0x3a4355, - 0x3a644d, - 0x3a6dcf, - 0x3a770f, - 0x3ab80f, - 0x3abbce, - 0x3abf4d, - 0x3ad911, - 0x3b240c, - 0x3b270c, - 0x3b2a0b, - 0x3b2ccc, - 0x3b344f, - 0x3b3812, - 0x3b3e0d, - 0x3b504c, - 0x3b594c, - 0x3b5c4d, - 0x3b5f8f, - 0x3b634e, - 0x3b9a4c, - 0x3ba00d, - 0x3ba34b, - 0x3bc18c, - 0x3bd08d, - 0x3bd3ce, - 0x3bd749, - 0x3be713, - 0x3c014d, - 0x3c084d, - 0x3c0e4c, - 0x3c14ce, - 0x3c1a0f, - 0x3c1dcc, - 0x3c20cd, - 0x3c240f, - 0x3c27cc, - 0x3c2dcc, - 0x3c3b0c, - 0x3c3e0c, - 0x3c500d, - 0x3c5352, - 0x3c5dcc, - 0x3c60cc, - 0x3c63d1, - 0x3c680f, - 0x3c6bcf, - 0x3c6f93, - 0x3c8d8e, - 0x3c910f, - 0x3c94cc, - 0x733c9b8e, - 0x3c9f0f, - 0x3ca2d6, - 0x3cadd2, - 0x3ce68c, - 0x3cf1cf, - 0x3cf84d, - 0x3dd30f, - 0x3dd6cc, - 0x3dd9cd, - 0x3ddd0d, - 0x3df8ce, - 0x3e040c, - 0x3e2c4c, - 0x3e2f50, - 0x3e5451, - 0x3e588b, - 0x3e5ccc, - 0x3e5fce, - 0x3e72d1, - 0x3e770e, - 0x3e7a8d, - 0x3f074b, - 0x3f170f, - 0x3f20d4, - 0x216e82, - 0x216e82, - 0x206c43, - 0x216e82, - 0x206c43, - 0x216e82, - 0x214f82, - 0x259145, - 0x3e6fcc, - 0x216e82, - 0x216e82, - 0x214f82, - 0x216e82, - 0x2ac905, - 0x28de45, - 0x216e82, - 0x216e82, - 0x202642, - 0x2ac905, - 0x33f149, - 0x3721cc, - 0x216e82, - 0x216e82, - 0x216e82, - 0x216e82, - 0x259145, - 0x216e82, - 0x216e82, - 0x216e82, - 0x216e82, - 0x202642, - 0x33f149, - 0x216e82, - 0x216e82, - 0x216e82, - 0x28de45, - 0x216e82, - 0x28de45, - 0x3721cc, - 0x3e6fcc, - 0x20bec3, - 0x21a043, - 0x226783, - 0x20f583, - 0x25c204, - 0x214883, - 0x24acc3, - 0x5e8f, - 0x1dc8, - 0x7cb84, - 0xeaec8, - 0x2000c2, - 0x7420b642, - 0x24e8c3, - 0x23eb44, + 0x2fbfc6, + 0x3b1b45, + 0x24b105, + 0x375009, + 0x22fa4b, + 0x3cc986, + 0x357686, + 0x208a44, + 0x252946, + 0x30d0c8, + 0x3d00c6, + 0x27da46, + 0x204248, + 0x205a47, + 0x206889, + 0x207405, + 0x1b9688, + 0x3e3a84, + 0x31e2c4, + 0x20d605, + 0x34a809, + 0x20da07, + 0x20da0b, + 0x22620a, + 0x229685, + 0x6d605182, + 0x3f3587, + 0x6da29a08, + 0x3c5807, + 0x2df745, + 0x23b3ca, + 0x2202, + 0x28b00b, + 0x28d58a, + 0x2778c6, + 0x35f383, + 0x20374d, + 0x3d7ccc, + 0x20dc8d, + 0x236505, + 0x211185, + 0x25f207, + 0x218d49, + 0x229bc6, + 0x273585, + 0x32ac08, + 0x23a783, + 0x2e44c8, + 0x252848, + 0x3c4987, + 0x23a788, + 0x23e289, + 0x37d047, + 0x3bdfc7, + 0x3e4fc8, + 0x211884, + 0x211887, + 0x282288, + 0x367e86, + 0x3c5fcf, + 0x244bc7, + 0x35ff06, + 0x22df85, + 0x2259c3, + 0x24d0c7, + 0x38f643, + 0x252fc6, + 0x256246, + 0x259a46, + 0x29fe05, + 0x277dc3, + 0x39a608, + 0x3a3889, + 0x25a08b, + 0x25b588, + 0x25c705, + 0x25e805, + 0x6de596c2, + 0x266349, + 0x3d1c87, + 0x21e145, + 0x264a07, + 0x266e86, + 0x267e45, + 0x26a78b, + 0x26d004, + 0x271845, + 0x271987, + 0x285a46, + 0x286785, + 0x292a87, + 0x292fc7, + 0x2c6b84, + 0x2a0dca, + 0x2b76c8, + 0x378ec9, + 0x320105, + 0x275a06, + 0x30d28a, + 0x2782c6, + 0x3ec587, + 0x27a40d, + 0x2b42c9, + 0x384545, + 0x3c3f07, + 0x3db508, + 0x3dbec8, + 0x33abc7, + 0x3c2e06, + 0x216107, + 0x2556c3, + 0x31b8c4, + 0x385d45, + 0x3b0207, + 0x3ba289, + 0x225dc8, + 0x3ec485, + 0x273844, + 0x24f305, + 0x25ea4d, + 0x207002, + 0x2d6246, + 0x2c8946, + 0x30e2ca, + 0x3a1606, + 0x3adc85, + 0x38c3c5, + 0x38c3c7, + 0x3b300c, + 0x2641ca, + 0x29b046, + 0x2e0dc5, + 0x252786, + 0x29b907, + 0x29de46, + 0x29fd0c, + 0x24ff09, + 0x6e244447, + 0x2a3045, + 0x2a3046, + 0x2a3548, + 0x249685, + 0x2b4f85, + 0x2b5708, + 0x2b590a, + 0x6e60bd82, + 0x6ea09b02, + 0x2afc05, + 0x31c703, + 0x31df88, + 0x285e43, + 0x2b5b84, + 0x2a8d8b, + 0x2b90c8, + 0x333d88, + 0x6ef4da49, + 0x2bbe09, + 0x2bc746, + 0x2be388, + 0x2be589, + 0x2c0206, + 0x2c0385, + 0x24e186, + 0x2c0809, + 0x2d7ec7, + 0x255206, + 0x358587, + 0x358e47, + 0x3a0e04, + 0x6f3e4e09, + 0x2d4208, + 0x2c5148, + 0x38d2c7, + 0x2e1e06, + 0x3cc189, + 0x2db7c7, + 0x3af3ca, + 0x3edb48, + 0x2775c7, + 0x2e4d86, + 0x3e0d4a, + 0x347c08, + 0x2e8845, + 0x2ba485, + 0x30a547, + 0x318549, + 0x318a4b, + 0x32ee08, + 0x3dc5c9, + 0x25bc07, + 0x2ced4c, + 0x2cf24c, + 0x2cf54a, + 0x2cf7cc, + 0x2db088, + 0x2db288, + 0x2db484, + 0x2dc889, + 0x2dcac9, + 0x2dcd0a, + 0x2dcf89, + 0x2dd307, + 0x3c5bcc, + 0x3cab06, + 0x279dc8, + 0x278386, + 0x318406, + 0x384447, + 0x396bc8, + 0x38bf8b, + 0x2fc0c7, + 0x2647c9, + 0x2683c9, + 0x28f507, + 0x2db744, + 0x267307, + 0x2e9246, + 0x213f86, + 0x34ff05, + 0x230e48, + 0x35da44, + 0x35da46, + 0x26408b, + 0x2aac09, + 0x2379c6, + 0x226509, + 0x20d6c6, + 0x3878c8, 0x207783, - 0x20f584, - 0x23c686, - 0x213883, - 0x390e44, - 0x2b1b85, - 0x214e43, - 0x214883, - 0x24acc3, - 0x228d8a, - 0x3ae506, - 0x39d98c, - 0x1c3448, - 0x20b642, - 0x21a043, - 0x226783, - 0x20f583, - 0x2384c3, - 0x2f3c06, - 0x214883, - 0x24acc3, - 0x205443, - 0x15ac3, - 0xbe988, - 0x74d4c5c5, - 0x833c7, - 0x5bec5, - 0x1f2c7, - 0x14f985, - 0xaa204, - 0xaa20a, - 0x6249, - 0x2142, - 0x1d02ca, - 0x75b26305, - 0x14f985, - 0x133907, - 0x19c08, - 0x1048e, - 0x9dd52, - 0x13990b, - 0x11c746, - 0x75edb105, - 0x762db10c, - 0x1e2287, - 0xf5d87, - 0x1dafca, - 0x48cd0, - 0x153ac5, - 0xbe6cb, - 0xd5448, - 0x42b87, - 0x3e7cb, - 0xa9cc9, - 0x59307, - 0x1e9987, - 0x3d087, - 0x42ac6, - 0xa3c8, - 0x76838286, - 0x5e007, - 0x30546, - 0xc398d, - 0xe2bd0, - 0x76c0a842, - 0x1e9808, - 0x19a110, - 0x19a84c, - 0x773a1bcd, - 0x727c8, - 0x72f4b, - 0x809c7, - 0xd1709, - 0x6d446, - 0xaae08, - 0x1742, - 0x70d0a, - 0x38447, - 0x19fb07, - 0xbf0c9, - 0xc1bc8, - 0x100185, - 0xe9c7, - 0x119186, - 0x9dc6, - 0x10aa0e, - 0x4c48e, - 0x164d8f, - 0x64d49, - 0x191549, - 0xabe4b, - 0xded0f, - 0x183bcc, - 0xdd68b, - 0x14de08, - 0x19f2c7, - 0x1ad388, - 0xc880b, - 0xc91cc, - 0xc95cc, - 0xc99cc, - 0xc9ccd, - 0x1c4448, - 0x83382, - 0x138fc9, - 0xab348, - 0xe578b, - 0xe7f46, - 0xefc4b, - 0x144ecb, - 0xf9e0a, - 0xfaf05, - 0x101590, - 0x104606, - 0x1cbd86, - 0x1a4045, - 0x1d95c7, - 0xf1e48, - 0x106587, - 0x106847, - 0xa79c7, - 0x29a86, - 0x16d3ca, - 0xb3c0a, - 0x1b506, - 0x19f8cd, - 0x5e0c8, - 0x11e848, - 0xf12c9, - 0x8af89, - 0xd4905, - 0x16018c, - 0xc9ecb, - 0x1c7bc9, - 0x18ec84, - 0x11b209, - 0x11b446, - 0x11506, - 0x3342, - 0x5f986, - 0x8b10b, - 0x127647, - 0x127807, - 0x79c2, - 0xe9545, - 0x19d84, + 0x3b1cc5, + 0x2140c9, + 0x205805, + 0x2fa4c4, + 0x244946, + 0x27db85, + 0x260806, + 0x323007, + 0x3beec6, + 0x234bcb, + 0x27b187, + 0x289046, + 0x292e46, + 0x22f0c6, + 0x3edf09, + 0x2b3d4a, + 0x36d405, + 0x24514d, + 0x2b5a06, + 0x2d00c6, + 0x3a7546, + 0x222405, + 0x2f94c7, + 0x27c247, + 0x31bcce, + 0x206543, + 0x2e1dc9, + 0x3a2949, + 0x313ec7, + 0x27c707, + 0x2359c5, + 0x37b205, + 0x6f606c0f, + 0x2e7ec7, + 0x2e8088, + 0x2e84c4, + 0x2e8706, + 0x6fa4a502, + 0x2ec746, + 0x2ee6c6, + 0xf9d4c, + 0x201b8e, + 0x2e430a, + 0x203bc6, + 0x211b8a, + 0x3cb2c9, + 0x23b845, + 0x371b48, + 0x316446, + 0x2c3dc8, + 0x300308, + 0x294b8b, + 0x39da45, + 0x276888, + 0x20438c, + 0x2df607, + 0x256e06, + 0x2e4ac8, + 0x38bc48, + 0x6fe12282, + 0x3d4dcb, + 0x34f589, + 0x38b809, + 0x207607, + 0x3cbb88, + 0x7023eb48, + 0x332a8b, + 0x254389, + 0x266b4d, + 0x34d4c8, + 0x2d0708, + 0x70601582, + 0x21e7c4, + 0x70a232c2, + 0x36cf46, + 0x70e0b7c2, + 0x30794a, + 0x26c006, + 0x226b88, + 0x250688, + 0x261bc6, + 0x2c46c6, + 0x30db86, + 0x21e3c5, + 0x23bd04, + 0x71387844, + 0x35cbc6, + 0x259ec7, + 0x71688b47, + 0x3979cb, + 0x3c5a09, + 0x2111ca, + 0x38c504, + 0x2dba88, + 0x254fcd, + 0x308249, + 0x308488, + 0x308709, + 0x30a904, + 0x241744, + 0x283905, + 0x3b644b, + 0x2b9046, + 0x35ca05, + 0x390089, + 0x3604c8, + 0x267e84, + 0x313c49, + 0x234b05, + 0x3340c8, + 0x3daa07, + 0x320d88, + 0x290306, + 0x3c1847, + 0x2f2689, + 0x33be49, + 0x222c05, + 0x257445, + 0x71a1cd02, + 0x34e404, + 0x2f6345, + 0x39d846, + 0x383305, + 0x25e8c7, + 0x2daac5, + 0x283e04, + 0x3bdec6, + 0x273607, + 0x24a546, + 0x3b2185, + 0x20c888, + 0x3cf585, + 0x20f6c7, + 0x21ce89, + 0x2aad4a, + 0x226707, + 0x22670c, + 0x227d06, + 0x23f509, + 0x247cc5, + 0x2495c8, + 0x215d43, + 0x2c8a05, + 0x2f5e05, + 0x290b47, + 0x71e00b82, + 0x304207, + 0x2dd686, + 0x3e3406, + 0x2e8586, + 0x38bb86, + 0x24cbc8, + 0x2898c5, + 0x35ffc7, + 0x35ffcd, + 0x24e583, + 0x3cafc5, + 0x277147, + 0x304548, + 0x276d05, + 0x2198c8, + 0x228a46, + 0x316b87, + 0x2f44c5, + 0x39dac6, + 0x39ab85, + 0x3ccd4a, + 0x2f6ac6, + 0x2d7647, + 0x227a05, + 0x2f7f47, + 0x2f8bc4, + 0x2fa446, + 0x371a85, + 0x22c98b, + 0x2e90c9, + 0x38a8ca, + 0x222c88, + 0x30b588, + 0x310b8c, + 0x311407, + 0x314788, + 0x35c508, + 0x36d085, + 0x329a4a, + 0x326409, + 0x72201982, + 0x2a0686, + 0x230c84, + 0x230c89, + 0x2286c9, + 0x319a47, + 0x27f887, + 0x285089, + 0x2ccd48, + 0x2ccd4f, + 0x216b06, + 0x2f048b, + 0x25f9c5, + 0x25f9c7, + 0x354fc9, + 0x224c86, + 0x313bc7, + 0x2f3b45, + 0x2327c4, + 0x3b6a46, + 0x20dbc4, + 0x2cd507, + 0x339848, + 0x727b1a48, + 0x3c7005, + 0x3e5247, + 0x2d6849, + 0x20e204, + 0x246d48, + 0x72b0a388, + 0x2d0044, + 0x300d08, + 0x3c3b44, + 0x3b7149, + 0x3a7485, + 0x72e3d942, + 0x216b45, + 0x2ea785, + 0x33b788, + 0x236b47, + 0x732008c2, + 0x3cc545, + 0x2eafc6, + 0x267b06, + 0x34e3c8, + 0x350c88, + 0x3832c6, + 0x383e86, + 0x30eb09, + 0x3e3346, + 0x224b4b, + 0x2ff3c5, + 0x3a6586, + 0x2ac848, + 0x3023c6, + 0x2a2346, + 0x21b00a, + 0x3a008a, + 0x25ed45, + 0x29b747, + 0x283486, + 0x736046c2, + 0x277287, + 0x3e4905, + 0x30d204, + 0x30d205, + 0x2db986, + 0x388947, + 0x220c85, + 0x228784, + 0x2c6188, + 0x2a2405, + 0x2f5247, + 0x336d05, + 0x381785, + 0x212784, + 0x346609, + 0x2c62c8, + 0x249d86, + 0x3aaac6, + 0x33c3c6, + 0x73b2d148, + 0x30b407, + 0x3915cd, + 0x366a0c, + 0x3e0089, + 0x3e9109, + 0x73f7d942, + 0x3e8443, + 0x228183, + 0x2e9305, + 0x3b030a, + 0x344606, + 0x3ec8c5, + 0x323404, + 0x32340b, + 0x33910c, + 0x339a4c, + 0x339d55, + 0x33c74d, + 0x33eb8f, + 0x33ef52, + 0x33f3cf, + 0x33f792, + 0x33fc13, + 0x3400cd, + 0x34068d, + 0x340a0e, + 0x34130e, + 0x34198c, + 0x341d4c, + 0x34218b, + 0x342c0e, + 0x343512, + 0x3443cc, + 0x3448d0, + 0x351b12, + 0x35278c, + 0x352e4d, + 0x35318c, + 0x356491, + 0x35780d, + 0x359f8d, + 0x35a58a, + 0x35a80c, + 0x35bc0c, + 0x35c70c, + 0x35d18c, + 0x362213, + 0x362c10, + 0x363010, + 0x36368d, + 0x363c8c, + 0x367009, + 0x36914d, + 0x369493, + 0x36ba11, + 0x36c213, + 0x36d54f, + 0x36d90c, + 0x36dc0f, + 0x36dfcd, + 0x36e5cf, + 0x36e990, + 0x36f40e, + 0x37564e, + 0x375f90, + 0x376acd, + 0x37744e, + 0x3777cc, + 0x378693, + 0x37abce, + 0x37b710, + 0x37bb11, + 0x37bf4f, + 0x37c313, + 0x37d4cd, + 0x37d80f, + 0x37dbce, + 0x37e150, + 0x37e549, + 0x37f9d0, + 0x37fecf, + 0x38054f, + 0x380912, + 0x38248e, + 0x382f4d, + 0x3835cd, + 0x38390d, + 0x38468d, + 0x3849cd, + 0x384d10, + 0x38510b, + 0x385b0c, + 0x385e8c, + 0x38648c, + 0x38678e, + 0x3947d0, + 0x396512, + 0x39698b, + 0x396dce, + 0x39714e, + 0x39804e, + 0x3985cb, + 0x74398956, + 0x39988d, + 0x39a154, + 0x39ae4d, + 0x39d095, + 0x39f28d, + 0x39fc0f, + 0x3a040f, + 0x3a3acf, + 0x3a3e8e, + 0x3a420d, + 0x3a5751, + 0x3a8ecc, + 0x3a91cc, + 0x3a94cb, + 0x3a978c, + 0x3a9fcf, + 0x3aa392, + 0x3ab18d, + 0x3ac3cc, + 0x3acccc, + 0x3acfcd, + 0x3ad30f, + 0x3ad6ce, + 0x3affcc, + 0x3b058d, + 0x3b08cb, + 0x3b154c, + 0x3b268d, + 0x3b29ce, + 0x3b2d49, + 0x3b3d13, + 0x3b44cd, + 0x3b4bcd, + 0x3b51cc, + 0x3b588e, + 0x3b7c4f, + 0x3b800c, + 0x3b830d, + 0x3b864f, + 0x3b8a0c, + 0x3b900c, + 0x3b9d4c, + 0x3ba04c, + 0x3bac4d, + 0x3baf92, + 0x3bba0c, + 0x3bbd0c, + 0x3bc011, + 0x3bc44f, + 0x3bc80f, + 0x3bcbd3, + 0x3bf84e, + 0x3bfbcf, + 0x3bff8c, + 0x747c064e, + 0x3c09cf, + 0x3c0d96, + 0x3c44d2, + 0x3c7a8c, + 0x3c818f, + 0x3c880d, + 0x3df10f, + 0x3df4cc, + 0x3df7cd, + 0x3dfb0d, + 0x3e168e, + 0x3e2b8c, + 0x3e5b4c, + 0x3e5e50, + 0x3e77d1, + 0x3e7c0b, + 0x3e804c, + 0x3e834e, + 0x3e9651, + 0x3e9a8e, + 0x3e9e0d, + 0x3efb4b, + 0x3f0c4f, + 0x3f1894, + 0x2062c2, + 0x2062c2, + 0x204383, + 0x2062c2, + 0x204383, + 0x2062c2, + 0x203cc2, + 0x24e1c5, + 0x3e934c, + 0x2062c2, + 0x2062c2, + 0x203cc2, + 0x2062c2, + 0x2a43c5, + 0x2aad45, + 0x2062c2, + 0x2062c2, + 0x20b782, + 0x2a43c5, + 0x33cd09, + 0x36b70c, + 0x2062c2, + 0x2062c2, + 0x2062c2, + 0x2062c2, + 0x24e1c5, + 0x2062c2, + 0x2062c2, + 0x2062c2, + 0x2062c2, + 0x20b782, + 0x33cd09, + 0x2062c2, + 0x2062c2, + 0x2062c2, + 0x2aad45, + 0x2062c2, + 0x2aad45, + 0x36b70c, + 0x3e934c, + 0x250b03, + 0x206643, + 0x21f603, + 0x205503, + 0x2503c4, + 0x2109c3, + 0x21f143, + 0x29cf, + 0x13b548, + 0x74dc4, + 0xe7008, + 0x2000c2, + 0x75602202, + 0x2457c3, + 0x2f1684, + 0x203b43, + 0x205504, + 0x231386, + 0x244843, + 0x244804, + 0x29ebc5, + 0x206543, + 0x2109c3, + 0x21f143, + 0x3b6f0a, + 0x3a6386, + 0x3974cc, + 0x1b9688, + 0x202202, + 0x206643, + 0x21f603, + 0x205503, + 0x23ddc3, + 0x2ee6c6, + 0x2109c3, + 0x21f143, + 0x219683, + 0x1e143, + 0xb7188, + 0x761ed1c5, + 0x7e607, + 0x50085, + 0x17947, + 0x14cb05, + 0xa2a04, + 0xa2a0a, + 0x2d89, + 0x1ac2, + 0x1c928a, + 0x76fdd7c5, + 0x14cb05, + 0x342c7, + 0x6208, + 0x990e, + 0x97852, + 0x13608b, + 0x11b586, + 0x772d45c5, + 0x776d45cc, + 0x1e4a87, + 0xf08c7, + 0xdc24a, + 0x3f150, + 0x14dc45, + 0xba94b, + 0xc9a08, + 0x38107, + 0x12bccb, + 0xa24c9, + 0x4e387, + 0x1d9d47, + 0x1ccbc7, + 0x38046, + 0x7908, + 0x77c34106, + 0x5a307, + 0x28b86, + 0xbc84d, + 0xdbc10, + 0x78004c02, + 0x1d9bc8, + 0x193090, + 0x1937cc, + 0x787a4a0d, + 0x6b4c8, + 0x6bdcb, + 0x7b6c7, + 0x1030c9, + 0x63646, + 0xa3748, + 0x17382, + 0x6808a, + 0x3eec7, + 0xcc107, + 0xb78c9, + 0xbac08, + 0x1c9f45, + 0x6ba47, + 0x116a86, + 0x173cc6, + 0x1097ce, + 0x48bce, + 0x5e44f, + 0x80789, + 0x1e0b49, + 0xaf78b, + 0xde04f, + 0x19cd8c, + 0xd844b, + 0x129148, + 0x1978c7, + 0x1a51c8, + 0xc1b0b, + 0xc268c, + 0xc2a8c, + 0xc2e8c, + 0xc318d, + 0x1ba688, + 0x7e5c2, + 0x19a989, + 0xa8ac8, + 0xde94b, + 0xe2006, + 0xea8cb, + 0x14304b, + 0xf328a, + 0xf4245, + 0xf91d0, + 0x100986, + 0x1bf006, + 0x1cf805, + 0xd9807, + 0x101048, + 0x104ac7, + 0x104d87, + 0x172e07, + 0x20286, + 0x16cd8a, + 0xb4c0a, + 0x15d46, + 0xcbecd, + 0x5a3c8, + 0x11d908, + 0x126c9, + 0x86009, + 0xdd585, + 0x167fcc, + 0xc338b, + 0x1f009, + 0x118984, + 0x119c09, + 0x119e46, + 0x13206, + 0x2642, + 0x54146, + 0x8618b, + 0x1260c7, + 0x126287, + 0x3642, + 0xe3dc5, + 0x6384, 0x101, - 0x66fc3, - 0x76644746, - 0xaba43, + 0x5c183, + 0x77b60fc6, + 0xd1fc3, 0x382, 0xe44, 0xb02, - 0x65a04, + 0x14f04, 0x882, - 0xc502, - 0x2742, - 0x1da82, - 0x3d82, - 0x27e82, - 0x45c2, - 0x16e3c2, - 0x3cc82, + 0x8b82, + 0x8a42, + 0x69782, + 0x1782, + 0x21942, 0x3402, - 0x101c2, - 0x64842, - 0x26783, + 0x1547c2, + 0x31982, + 0x54302, + 0x23c2, + 0x56442, + 0x1f603, 0x942, - 0x1b42, - 0x1b5c2, - 0x5b02, + 0x1342, + 0xfd02, + 0x8102, 0x642, - 0x30fc2, - 0x7142, - 0x2502, - 0x13f82, + 0x29602, + 0x15fc2, + 0x1442, + 0x4142, 0x5c2, - 0xbc83, - 0x40c2, - 0x1ac2, - 0x5cf02, - 0xaf02, - 0x12a82, - 0xfe02, - 0x24902, - 0x5942, + 0x11e43, + 0x2b82, + 0x4b02, + 0x510c2, + 0x6982, + 0x6b42, + 0x9582, + 0x1a202, + 0x2042, 0xec2, - 0x19b302, - 0x82182, - 0x736c2, - 0x14883, + 0x194cc2, + 0x7d202, + 0x70c2, + 0x109c3, 0x602, - 0x18882, - 0x25c2, - 0x7982, - 0x1b645, - 0x8782, - 0x21e42, - 0x1d4883, + 0x12282, + 0x1f42, + 0x16282, + 0x22b85, + 0x4f82, + 0x1a002, + 0x1dea83, 0x682, - 0x3b02, + 0x10002, 0x1042, - 0x1a82, - 0x14942, + 0x1a42, + 0x10a82, 0x8c2, - 0xba42, - 0x3342, - 0x5985, - 0x77614f82, - 0x77b0ed43, - 0x4d03, - 0x77e14f82, - 0x4d03, - 0xe7807, - 0x205683, + 0x5fc2, + 0x2642, + 0x2c45, + 0x78a03cc2, + 0x78f09343, + 0x15c43, + 0x79203cc2, + 0x15c43, + 0xe18c7, + 0x20d5c3, 0x2000c2, - 0x21a043, - 0x226783, - 0x204703, + 0x206643, + 0x21f603, + 0x3d6403, 0x2005c3, - 0x2384c3, - 0x214883, - 0x208c43, - 0x24acc3, - 0x319283, - 0x1ad803, - 0x1ad804, - 0x180286, - 0xd4944, - 0x179d45, - 0x10e445, - 0x18b43, - 0x1c3448, - 0x21a043, - 0x226783, - 0x204703, - 0x214e43, - 0x214883, - 0x208c43, - 0x24acc3, - 0x21a043, - 0x226783, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, + 0x23ddc3, + 0x2109c3, + 0x21d783, + 0x21f143, + 0x2d2003, + 0x1a5643, + 0x1a5644, + 0x1797c6, + 0xdd5c4, + 0x100505, + 0x10cf85, + 0x1c36c3, + 0x1b9688, + 0x206643, + 0x21f603, + 0x3d6403, + 0x206543, + 0x2109c3, + 0x21d783, + 0x21f143, + 0x206643, + 0x21f603, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, 0x200181, - 0x214e43, - 0x214883, - 0x262243, - 0x24acc3, - 0x5b84, - 0x20bec3, - 0x21a043, - 0x226783, - 0x219ec3, - 0x204703, - 0x262283, - 0x245143, - 0x2bc303, - 0x220783, - 0x20f583, - 0x25c204, - 0x214883, - 0x24acc3, + 0x206543, + 0x2109c3, + 0x22b643, + 0x21f143, + 0x1df004, + 0x250b03, + 0x206643, + 0x21f603, + 0x2064c3, + 0x3d6403, + 0x290a43, + 0x234503, + 0x2b4843, + 0x24c8c3, + 0x205503, + 0x2503c4, + 0x2109c3, + 0x21f143, 0x200f83, - 0x3496c4, - 0x235843, - 0x5d83, - 0x2548c3, - 0x337908, - 0x2a1784, + 0x3c7f44, + 0x211103, + 0x28c3, + 0x24a643, + 0x333648, + 0x358c84, 0x20020a, - 0x26b146, - 0xdcd44, - 0x3beb87, - 0x22a0ca, - 0x216849, - 0x3d0147, - 0x3d5a0a, - 0x20bec3, - 0x21e6cb, - 0x37ff09, - 0x3a8445, - 0x3c30c7, - 0xb642, - 0x21a043, - 0x321187, - 0x351105, - 0x2e25c9, - 0x2370e, - 0x226783, - 0x246b86, - 0x322c83, - 0xcf003, - 0x122446, - 0x3946, - 0x13807, - 0x215186, - 0x21f445, - 0x20e007, - 0x31ef87, - 0x7aa0f583, - 0x3566c7, - 0x28af43, - 0xc1089, + 0x25efc6, + 0xd6644, + 0x3b4187, + 0x22370a, + 0x2169c9, + 0x3c9107, + 0x3ce48a, + 0x250b03, + 0x2afc8b, + 0x379449, + 0x386fc5, + 0x3b9307, + 0x2202, + 0x206643, + 0x26a5c7, + 0x30fec5, + 0x2db609, + 0x1b98e, + 0x21f603, + 0x23bfc6, + 0x2d96c3, + 0xdd703, + 0x122506, + 0x1dd686, + 0x1f4147, + 0x2142c6, + 0x21aa05, + 0x2074c7, + 0x2ef9c7, + 0x7be05503, + 0x3529c7, + 0x285fc3, + 0xba349, + 0x301f85, + 0x2503c4, + 0x2c7bc8, + 0x3e868c, + 0x2c6ec5, + 0x2b4446, + 0x30fd87, + 0x21f307, + 0x289187, + 0x289c08, + 0x32118f, + 0x3bec05, + 0x205247, + 0x2d4d07, + 0x3d848a, + 0x32aa49, + 0x322645, + 0x33390a, + 0x1270c6, + 0xcdd87, + 0x2d9745, + 0x3913c4, + 0x345786, + 0x1820c6, + 0x393a47, + 0x2dff87, + 0x33ad48, + 0x20f845, + 0x26a4c6, + 0x20508, + 0x27d9c5, + 0x26446, + 0x271585, + 0x243004, + 0x240fc7, + 0x24ca0a, + 0x2add88, + 0x205e46, + 0x3ddc3, + 0x2dfa85, + 0x2b0006, + 0x3c5e06, + 0x201e46, + 0x206543, + 0x3ab407, + 0xf9d4c, + 0x2d4c85, + 0x2109c3, + 0x2f354d, + 0x21d783, + 0x33ae48, + 0x27fc04, + 0x2b0305, + 0x2b5bc6, + 0x3d4b46, + 0x3a6487, + 0x254247, + 0x27e945, + 0x21f143, + 0x36ac87, + 0x3a2e09, + 0x323c89, + 0x38934a, + 0x207202, + 0x301f44, + 0x324444, + 0x303f07, + 0x3040c8, + 0x305b49, + 0x3cae89, + 0x306787, + 0x111609, + 0x20bc06, + 0x109546, + 0x30a904, + 0x22e98a, + 0x30cbc8, + 0x30da49, + 0x30de86, + 0x2cb145, + 0x2adc48, + 0x2e228a, + 0x27b403, + 0x335846, + 0x306887, + 0x35eb45, + 0x82b48, + 0x3bd345, + 0x2130c3, + 0x21bc04, + 0x4d509, + 0x2ba445, + 0x2930c7, + 0x2c6405, + 0x2f0a46, + 0x1062c5, + 0x203c83, + 0x203c89, + 0x2b00cc, + 0x2d720c, + 0x3142c8, + 0x2a3bc7, + 0x3156c8, + 0x115d07, + 0x31688a, + 0x316e8b, + 0x379588, + 0x3d4c48, + 0x238746, + 0x3e1985, + 0x34710a, + 0x2298c5, + 0x23d942, + 0x2e0f47, + 0x28e506, + 0x37f205, + 0x318d09, + 0x3be785, + 0x1d7b08, + 0x2bc385, + 0x301b49, + 0x32bac6, + 0x3d3a08, + 0x2b03c3, + 0x20b586, + 0x244886, 0x3256c5, - 0x25c204, - 0x2cde88, - 0x3e630c, - 0x2cd005, - 0x2bbf06, - 0x321047, - 0x3c7ec7, - 0x28fb87, - 0x297fc8, - 0x3217cf, - 0x349145, - 0x208a47, - 0x217d47, - 0x3a148a, - 0x315289, - 0x32bd05, - 0x32700a, - 0x10de86, - 0xd4d07, - 0x2e1485, - 0x398204, - 0x347b06, - 0x1974c6, - 0x260947, - 0x2e68c7, - 0x348748, - 0x213045, - 0x351006, - 0x2f908, - 0x363b05, - 0x2c406, - 0x2c0a45, - 0x243344, - 0x297e47, - 0x25668a, - 0x2b7808, - 0x3d2c86, - 0x384c3, - 0x2fc3c5, - 0x21ea46, - 0x34cac6, - 0x2024c6, - 0x214e43, - 0x3b4087, - 0x10210c, - 0x217cc5, - 0x214883, - 0x2fa0cd, - 0x208c43, - 0x348848, - 0x284784, - 0x21ed45, - 0x2bcf46, - 0x203286, - 0x3ae607, - 0x25fa87, - 0x283705, - 0x24acc3, - 0x3b8fc7, - 0x3bf949, - 0x323ec9, - 0x39054a, - 0x204d82, - 0x325684, - 0x337d84, - 0x3059c7, - 0x305b88, - 0x307609, - 0x379909, - 0x3082c7, - 0x113e89, - 0x2630c6, - 0x10a786, - 0x30b844, - 0x238fca, - 0x30db08, - 0x30ef49, - 0x30f386, - 0x2d1e45, - 0x2b76c8, - 0x2e81ca, - 0x236d03, - 0x349846, - 0x3083c7, - 0x237e45, - 0x891c8, - 0x3c7705, - 0x219183, - 0x223984, - 0x56cc9, - 0x2c1185, - 0x29a887, - 0x3cdec5, - 0x2ed1c6, - 0x107d85, - 0x232203, - 0x232209, - 0x21eb0c, - 0x2d954c, - 0x316208, - 0x2a8fc7, - 0x3f0d88, - 0x118c07, - 0x318f8a, - 0x3195cb, - 0x380048, - 0x203388, - 0x2681c6, - 0x2614c5, - 0x34340a, - 0x2184c5, - 0x249902, - 0x2e66c7, - 0x27b8c6, - 0x385b45, - 0x398389, - 0x233885, - 0x1b76c8, - 0x2b44c5, - 0x325289, - 0x32d706, - 0x3eb748, - 0x324583, - 0x211fc6, - 0x390ec6, - 0x326e05, - 0x326e09, - 0x28aa89, - 0x261247, - 0x12ba84, - 0x32ba87, - 0x379809, - 0x22a2c5, - 0x469c8, - 0x3d4705, - 0x205585, - 0x3706c9, - 0x2029c2, - 0x235644, - 0x203d42, - 0x2040c2, - 0x2edf05, - 0x37e948, - 0x2d4845, - 0x2e3dc3, - 0x2e3dc5, - 0x2f1dc3, - 0x211282, - 0x2342c4, - 0x206743, - 0x207002, - 0x2fd284, - 0x314903, - 0x211b42, - 0x289ec3, - 0x21b484, - 0x30f503, - 0x269644, - 0x202fc2, - 0x2213c3, - 0x219883, - 0x2075c2, - 0x2a7382, - 0x28a8c9, - 0x20dfc2, - 0x29f2c4, - 0x206782, - 0x288904, - 0x263084, - 0x30c944, - 0x203342, - 0x24a8c2, - 0x239b03, - 0x2ae143, - 0x29aa04, - 0x2ec444, - 0x329104, - 0x32bc04, - 0x326803, - 0x2f7cc3, - 0x30de04, - 0x32cf44, - 0x32d086, - 0x205402, - 0xb642, - 0x4f583, - 0x20b642, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x177c5, + 0x3256c9, + 0x283f09, + 0x2699c7, + 0x129804, + 0x329807, + 0x3cad89, + 0x223905, + 0x3be08, + 0x3c2b85, + 0x3d5685, + 0x3dbc09, + 0x206182, + 0x30f5c4, + 0x201602, + 0x202b82, + 0x2ecd85, + 0x3258c8, + 0x2cdb45, + 0x2dd4c3, + 0x2dd4c5, + 0x2ec943, + 0x20a702, + 0x22b384, + 0x203283, + 0x206702, + 0x3017c4, + 0x31dac3, + 0x20bc02, + 0x254b83, + 0x215cc4, + 0x30e003, + 0x259444, + 0x204842, + 0x219583, + 0x211c83, + 0x203c82, + 0x2b26c2, + 0x283d49, + 0x207482, + 0x298684, + 0x202002, + 0x261644, + 0x20bbc4, + 0x30ba04, + 0x202642, + 0x238382, + 0x22ddc3, + 0x31f043, + 0x2cd7c4, + 0x306a04, + 0x329984, + 0x333804, + 0x325043, + 0x3705c3, + 0x327044, + 0x32b344, + 0x32b486, + 0x3d5502, + 0x2202, + 0x47183, + 0x202202, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x36c5, 0x2000c2, - 0x20bec3, - 0x21a043, - 0x226783, - 0x201b43, - 0x20f583, - 0x25c204, - 0x28ab84, - 0x225a04, - 0x214883, - 0x24acc3, - 0x205443, - 0x30be44, - 0x3346c3, - 0x251943, - 0x389b04, - 0x3d4506, - 0x204243, - 0x14f985, - 0xf5d87, - 0x288583, - 0x7c62d088, - 0x265883, - 0x2cfcc3, - 0x21e803, - 0x2384c3, - 0x3b8305, - 0x1c3083, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x2113c3, - 0x206083, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x20bc83, - 0x214883, - 0x26c584, - 0x24acc3, - 0x2d8484, - 0x14f985, - 0x36dd05, - 0xf5d87, - 0x20b642, - 0x203142, + 0x250b03, + 0x206643, + 0x21f603, + 0x204d03, + 0x205503, + 0x2503c4, + 0x284004, + 0x294744, + 0x2109c3, + 0x21f143, + 0x219683, + 0x30af04, + 0x330b03, + 0x2375c3, + 0x383204, + 0x3c2986, + 0x207843, + 0x14cb05, + 0xf08c7, + 0x21dac3, + 0x7da24f08, + 0x2534c3, + 0x2c90c3, + 0x2721c3, + 0x23ddc3, + 0x3cc445, + 0x1b92c3, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x26a483, + 0x202bc3, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x211e43, + 0x2109c3, + 0x262784, + 0x21f143, + 0x2b2b44, + 0x14cb05, + 0x31af85, + 0xf08c7, + 0x202202, + 0x201482, 0x200382, - 0x201f42, + 0x2018c2, 0x2003c2, - 0x2d84, - 0x21a043, - 0x243544, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x225a04, - 0x214883, - 0x24acc3, - 0x219303, - 0x265a04, - 0x1c3448, - 0x21a043, - 0x208c43, - 0x18b43, - 0x12e504, - 0x2346c4, - 0x1c3448, - 0xb642, - 0x21a043, - 0x2607c4, - 0x25c204, - 0x208c43, - 0x201802, - 0x24acc3, - 0x2423c3, - 0x23984, - 0x33a505, - 0x249902, - 0x333483, - 0x1beec9, - 0xf67c6, - 0x8c108, + 0x1d4644, + 0x206643, + 0x2392c4, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x294744, + 0x2109c3, + 0x21f143, + 0x213dc3, + 0x214f04, + 0x1b9688, + 0x206643, + 0x21d783, + 0x1c36c3, + 0x12d9c4, + 0x204884, + 0x14cb05, + 0x1b9688, + 0x2202, + 0x206643, + 0x2554c4, + 0x2503c4, + 0x21d783, + 0x201582, + 0x21f143, + 0x237343, + 0x1bc04, + 0x33d845, + 0x23d942, + 0x3de983, + 0x1a2389, + 0xf1506, + 0x84b08, 0x2000c2, - 0x1c3448, - 0x7fafe207, - 0x20b642, - 0x226783, - 0x20f583, + 0x1b9688, + 0x80fbed87, + 0x202202, + 0x21f603, + 0x205503, 0x2005c2, - 0x24acc3, - 0x54c2, + 0x21f143, + 0x8f42, 0x82, - 0x23984, + 0x1bc04, 0xc2, - 0x1d5bc7, - 0x17149, - 0x5c83, - 0x1c3448, - 0x1da43, - 0x80330047, - 0x1a043, - 0xa8408, - 0x26783, - 0x85587, - 0xf583, - 0x1eeb86, - 0xbc83, - 0xb5fc8, - 0xdf908, - 0x1d8b03, - 0x8f606, - 0x8053b0c5, - 0x1b8b85, - 0x14e43, - 0xa2388, - 0xea7c8, - 0x69243, - 0x808fb886, - 0x102dc5, - 0x1bf644, - 0x412c7, - 0x14883, - 0x7803, - 0x4acc3, - 0x4d42, - 0x19720a, - 0x9c83, - 0x80edc7cc, - 0xedfc3, - 0x1b04, - 0x1215cb, - 0x121b88, - 0xa4fc2, - 0x123d83, - 0x1416c47, - 0x15d4b87, - 0x14e3e88, - 0x1523d83, - 0x1cd6c8, - 0x149a144, - 0x1eafcb, - 0x13c02, - 0x13b587, - 0x1547c4, + 0x1ce647, + 0x172c9, + 0x27c3, + 0x1b9688, + 0x69743, + 0x8172e887, + 0x6643, + 0xa0588, + 0x1f603, + 0x58287, + 0x5503, + 0x3fa46, + 0x11e43, + 0x42308, + 0xd89c8, + 0x1d1483, + 0x87f86, + 0x81937585, + 0x1db6c5, + 0x6543, + 0x9b108, + 0xe5dc8, + 0x5e943, + 0x81cf4f06, + 0xfa885, + 0x1a2b04, + 0x36287, + 0x109c3, + 0x4643, + 0x1f143, + 0x15c82, + 0x18fd0a, + 0x20e43, + 0x8220ff4c, + 0xcdc03, + 0x150c4, + 0x120f8b, + 0x121548, + 0x9cfc2, + 0x123b43, + 0x1410087, + 0x153b287, + 0x151a888, + 0x1523b43, + 0x1c7448, + 0x14e9e04, + 0xfd6cb, + 0xd842, + 0x137a47, + 0x14e6c4, + 0xf0c87, 0x2000c2, - 0x20b642, - 0x243544, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, - 0x2384c3, - 0x214883, - 0x24acc3, - 0x229a83, - 0x219303, - 0x15ac3, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x18b43, - 0x26783, - 0x8300f583, - 0x833c7, - 0x21a043, - 0x226783, - 0x20f583, - 0x25c204, - 0x2384c3, - 0x214883, - 0x24acc3, - 0x22ccc2, + 0x202202, + 0x2392c4, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, + 0x23ddc3, + 0x2109c3, + 0x21f143, + 0x220283, + 0x213dc3, + 0x1e143, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x1c36c3, + 0x1f603, + 0x84405503, + 0x7e607, + 0x206643, + 0x21f603, + 0x205503, + 0x2503c4, + 0x23ddc3, + 0x2109c3, + 0x21f143, + 0x224b42, 0x2000c1, 0x2000c2, 0x200201, - 0x340a82, - 0x1c3448, - 0x226f45, + 0x33ec82, + 0x1b9688, + 0x220a05, 0x200101, - 0x1a043, - 0x3e084, + 0x6643, + 0x32184, 0x200cc1, 0x200501, 0x200bc1, - 0x2590c2, - 0x396b44, - 0x2590c3, + 0x24e142, + 0x38f644, + 0x24e143, 0x200041, 0x200801, 0x200181, - 0x37d06, - 0x1e144c, + 0x2dac6, + 0x1e3c4c, 0x200701, - 0x324e47, - 0x306d0f, - 0x3ccd06, + 0x368ec7, + 0x30524f, + 0x3e53c6, 0x2004c1, - 0x32e546, + 0x32da06, 0x200ec1, - 0x10210c, + 0xf9d4c, 0x200581, - 0x3c2a4e, + 0x3b8c8e, 0x2003c1, - 0x24acc3, - 0x2015c1, - 0x83ed2ac4, - 0x23ba45, - 0x204d42, - 0x219085, + 0x21f143, + 0x201401, + 0x852cbdc4, + 0x241c05, + 0x215c82, + 0x212fc5, 0x200401, 0x200741, 0x2007c1, - 0x249902, + 0x23d942, 0x200081, 0x200f81, - 0x202d81, - 0x203481, - 0x2033c1, - 0x2802, - 0x67409, - 0x1c3448, - 0x21a043, - 0x226783, - 0x1008c8, - 0x20f583, - 0x214883, - 0x24acc3, - 0x221c03, - 0x1f3503, - 0x21a043, - 0x20f583, - 0xa4f08, - 0x214e43, - 0x214883, - 0x2a783, - 0x24acc3, - 0x84f34308, - 0x1f31c3, - 0x112c8, - 0x13c82, - 0x17c3, - 0xa842, - 0x3342, - 0x14f985, - 0x1c3448, - 0xa4806, - 0x1ec847, - 0x14f985, - 0xb284, - 0x19a3c8, - 0x550c4, - 0x12c887, - 0x6af04, - 0x5c28c, - 0x1e6304, - 0x2bd85, - 0x67409, - 0x1f0487, - 0xe1548, - 0x2aac6, - 0x12e0a, - 0x157714a, - 0x127984, - 0x15886c3, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x214883, - 0x24acc3, - 0x205d83, - 0x1c3448, - 0x21a043, - 0x226783, - 0x2eef84, - 0x24acc3, - 0x2cf905, - 0x26b544, - 0x21a043, - 0x226783, - 0x20f583, + 0x208f81, + 0x205381, + 0x201841, + 0x5682, + 0x5c4c9, + 0x1b9688, + 0x206643, + 0x21f603, + 0x58a88, + 0x205503, + 0x2109c3, + 0x21f143, + 0x219dc3, + 0x1f2c83, + 0x206643, + 0x205503, + 0x9cf08, + 0x206543, + 0x2109c3, + 0x3d43, + 0x21f143, + 0x863ddc88, + 0x1f2943, + 0x1d7808, + 0x886c2, + 0x29c3, + 0x4c02, + 0x2642, + 0x14cb05, + 0x1b9688, + 0x9da06, + 0x13b947, + 0x14cb05, + 0xfc204, + 0x1593348, + 0x4afc4, + 0x12ae87, + 0x614c4, + 0x5044c, + 0x1e8684, + 0x66545, + 0x5c4c9, + 0x1a7f47, + 0xd0b08, + 0x2b6c6, + 0x1c208a, + 0x15e71ca, + 0x130e44, + 0x1582cc3, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x2109c3, + 0x21f143, + 0x2028c3, + 0x1b9688, + 0x206643, + 0x21f603, + 0x2e4084, + 0x21f143, + 0x29f085, + 0x277a04, + 0x206643, + 0x21f603, + 0x205503, 0x200ec2, - 0x214883, - 0x24acc3, - 0x19303, - 0xf9b06, - 0xc8204, - 0x12fb86, - 0x20bec3, - 0x21a043, - 0x226783, - 0x20f583, + 0x2109c3, + 0x21f143, + 0x13dc3, + 0x1568c05, + 0xf2f86, + 0xc1504, + 0x12d506, + 0x250b03, + 0x206643, + 0x21f603, + 0x205503, 0x200ec2, - 0x214883, - 0x24acc3, - 0x219303, - 0x20b642, - 0x21a043, - 0x23e609, - 0x226783, - 0x2c22c9, - 0x214e43, - 0x214883, - 0x90544, - 0x24acc3, - 0x30b648, - 0x245ac7, - 0x33a505, - 0x62386, - 0xcc388, - 0x13a509, - 0x1e5ac8, - 0x1d5bc7, - 0x105e0a, - 0x1a9fcb, - 0x12e787, - 0x4d688, - 0x1e244a, - 0xcea48, - 0x17149, - 0x302c7, - 0x1bb647, - 0x1ec048, - 0xa8408, - 0x4ee4f, - 0xb79c5, - 0xa8707, - 0x1eeb86, - 0x1afc47, - 0x12be46, - 0xb5fc8, - 0xc28c6, - 0x1eb547, - 0x135dc9, - 0x36c87, - 0x1b3209, - 0xd5949, - 0xdcac6, - 0xdf908, - 0xcc645, - 0xf808a, - 0xea7c8, - 0x69243, - 0xf2648, - 0x412c7, - 0x171045, - 0xa1050, - 0x7803, - 0x1eb3c7, - 0x16085, - 0x106b48, - 0x1ea2c5, - 0xedfc3, - 0x1688, - 0x52c6, - 0x164b89, - 0xc5407, - 0x1bf18b, - 0x82404, - 0x11ae04, - 0x1215cb, - 0x121b88, - 0x122347, - 0x14f985, - 0x21a043, - 0x226783, - 0x204703, - 0x24acc3, - 0x24c843, - 0x20f583, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0xabf8b, + 0x2109c3, + 0x21f143, + 0x213dc3, + 0x202202, + 0x206643, + 0x232709, + 0x21f603, + 0x2bb789, + 0x206543, + 0x2109c3, + 0x88a84, + 0x21f143, + 0x30a708, + 0x238c07, + 0x33d845, + 0xa0cc6, + 0x133f48, + 0x13d849, + 0x1e7e48, + 0x1ce647, + 0x10434a, + 0x76acb, + 0x12dc47, + 0x43e08, + 0x1e4c4a, + 0xc8748, + 0x172c9, + 0x28907, + 0x1b6307, + 0x1bda88, + 0xa0588, + 0x46a4f, + 0xadf45, + 0xa0887, + 0x3fa46, + 0x1ec787, + 0x122786, + 0x42308, + 0xfb006, + 0x1d3807, + 0x1458c9, + 0x1b79c7, + 0x1c7849, + 0xce6c9, + 0xd63c6, + 0xd89c8, + 0x134205, + 0x7628a, + 0xe5dc8, + 0x5e943, + 0xecbc8, + 0x36287, + 0xfe605, + 0x162810, + 0x4643, + 0x1af247, + 0x16205, + 0x105088, + 0xfff05, + 0xcdc03, + 0xfd948, + 0x1d53c6, + 0x5e249, + 0xbe787, + 0x1a264b, + 0x118384, + 0x119804, + 0x120f8b, + 0x121548, + 0x122407, + 0x14cb05, + 0x206643, + 0x21f603, + 0x3d6403, + 0x21f143, + 0x243143, + 0x205503, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0xaf8cb, 0x2000c2, - 0x20b642, - 0x24acc3, - 0x45c2, + 0x202202, + 0x21f143, + 0x3402, 0xec2, 0xf82, - 0x1c3448, - 0x13bf09, - 0x1cd6c8, - 0xb642, + 0x1b9688, + 0x132d09, + 0x1c7448, + 0x2202, 0x2000c2, - 0x20b642, + 0x202202, 0x200382, 0x2005c2, - 0x2024c2, - 0x214883, - 0x18846, + 0x201e42, + 0x2109c3, + 0x12246, 0x2003c2, - 0x23984, + 0x1bc04, 0x2000c2, - 0x20bec3, - 0x20b642, - 0x21a043, - 0x226783, + 0x250b03, + 0x202202, + 0x206643, + 0x21f603, 0x200382, - 0x20f583, - 0x20bc83, - 0x214e43, - 0x225a04, - 0x214883, - 0x21c243, - 0x24acc3, - 0x201b04, + 0x205503, + 0x211e43, + 0x206543, + 0x294744, + 0x2109c3, + 0x20f4c3, + 0x21f143, + 0x2150c4, 0x200f83, - 0x20f583, - 0x20b642, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x208c43, - 0x24acc3, - 0x3ceb47, - 0x21a043, - 0x242ec7, - 0x32b106, - 0x212e83, - 0x226dc3, - 0x20f583, - 0x211983, - 0x25c204, - 0x2580c4, - 0x34ae46, - 0x23bc83, - 0x214883, - 0x13404b, - 0x24acc3, - 0x2cf905, - 0x336d44, - 0x3db4c3, - 0x2543c3, - 0x2e66c7, - 0x23ae45, - 0x4f43, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x1c77c4, - 0x24acc3, - 0xbb43, - 0x8c314a8c, - 0xe28c3, - 0x68ec7, - 0x8b246, - 0x1d95c7, - 0x1369c5, - 0x214202, - 0x25e203, - 0x21c683, - 0x20bec3, - 0x8ce1a043, - 0x20c942, - 0x226783, - 0x207783, - 0x20f583, - 0x25c204, - 0x20de83, - 0x349143, - 0x214e43, - 0x225a04, - 0x8d203642, - 0x214883, - 0x24acc3, - 0x211b43, - 0x219d83, - 0x214903, - 0x22ccc2, + 0x205503, + 0x202202, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21d783, + 0x21f143, + 0x206643, + 0x214183, + 0x20fd03, + 0x205503, + 0x2036c3, + 0x2503c4, + 0x296584, + 0x321b86, + 0x241f83, + 0x2109c3, + 0x1dd9cb, + 0x21f143, + 0x29f085, + 0x2e0f47, + 0x1d6c43, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x1bd404, + 0x21f143, + 0x19a43, + 0x8db1300c, + 0xdb903, + 0x71bc7, + 0x862c6, + 0x1db74c, + 0xd9807, + 0x1d3f85, + 0x210342, + 0x25a503, + 0x2d9c43, + 0x250b03, + 0x8ea06643, + 0x211ac2, + 0x21f603, + 0x203b43, + 0x205503, + 0x2503c4, + 0x33c683, + 0x3bec03, + 0x206543, + 0x294744, + 0x8ee024c2, + 0x2109c3, + 0x21f143, + 0x2330c3, + 0x206383, + 0x210a43, + 0x224b42, 0x200f83, - 0x1c3448, - 0x20f583, - 0x18b43, - 0x220c04, - 0x20bec3, - 0x20b642, - 0x21a043, - 0x243544, - 0x226783, - 0x20f583, - 0x25c204, - 0x20bc83, - 0x279504, - 0x230944, - 0x2f3c06, - 0x225a04, - 0x214883, - 0x24acc3, - 0x205443, - 0x27b8c6, - 0x474cb, - 0x38286, - 0x489ca, - 0x128d8a, - 0x1c3448, - 0x22f8c4, - 0x8e61a043, - 0x20be84, - 0x226783, - 0x2d1704, - 0x20f583, - 0x37ee83, - 0x214e43, - 0x214883, - 0x24acc3, - 0x23c83, - 0x35174b, - 0x3de04a, - 0x3f2ccc, - 0xfc148, + 0x1b9688, + 0x205503, + 0x1c36c3, + 0x22ffc4, + 0x250b03, + 0x202202, + 0x206643, + 0x2392c4, + 0x21f603, + 0x205503, + 0x2503c4, + 0x211e43, + 0x221e44, + 0x228f84, + 0x2ee6c6, + 0x294744, + 0x2109c3, + 0x21f143, + 0x219683, + 0x28e506, + 0x3c90b, + 0x34106, + 0x436ca, + 0x126b8a, + 0x1b9688, + 0x2204c4, + 0x90206643, + 0x34d284, + 0x21f603, + 0x212804, + 0x205503, + 0x325e03, + 0x206543, + 0x2109c3, + 0x21f143, + 0x1ccc3, + 0x34ec4b, + 0x3dfe4a, + 0x3f244c, + 0xf5848, 0x2000c2, - 0x20b642, + 0x202202, 0x200382, - 0x239805, - 0x25c204, + 0x22f2c5, + 0x2503c4, 0x200ec2, - 0x214e43, - 0x230944, - 0x201f42, + 0x206543, + 0x228f84, + 0x2018c2, 0x2003c2, - 0x206382, - 0x22ccc2, - 0xbec3, - 0x6282, - 0x2de849, - 0x27fc88, - 0x366989, - 0x3a7f49, - 0x23240a, - 0x31170a, - 0x202882, - 0x36e3c2, - 0xb642, - 0x21a043, - 0x203682, - 0x24ed46, - 0x3347c2, - 0x4a282, + 0x202ec2, + 0x224b42, + 0x50b03, + 0x2dc2, + 0x2da309, + 0x27ad48, + 0x239b49, + 0x3a0c49, + 0x203e8a, + 0x21898a, + 0x205202, + 0x3547c2, + 0x2202, + 0x206643, + 0x22dd42, + 0x246946, + 0x330c02, + 0x41402, 0x200d82, - 0x3aa34e, - 0x22140e, - 0x214807, - 0x218342, - 0x226783, - 0x20f583, - 0x2380c2, + 0x276e4e, + 0x2195ce, + 0x210947, + 0x211742, + 0x21f603, + 0x205503, + 0x20c7c2, 0x2005c2, - 0x31a43, - 0x24374f, - 0x24f082, - 0x3cdac7, - 0x2ccdc7, - 0x3382c7, - 0x2d12cc, - 0x2dc1cc, - 0x2208c4, - 0x29588a, - 0x221342, - 0x20af02, - 0x2d7504, + 0xfc83, + 0x2394cf, + 0x246c82, + 0x2c6007, + 0x2cac07, + 0x32efc7, + 0x2ca4cc, + 0x2e010c, + 0x22fc84, + 0x28374a, + 0x219502, + 0x206982, + 0x2d0244, 0x200702, - 0x251ec2, - 0x2dc404, - 0x21f082, - 0x212a82, - 0x22043, - 0x2c2947, - 0x320f45, - 0x224902, - 0x31f984, - 0x39b302, - 0x2fba08, - 0x214883, - 0x385ec8, - 0x207602, - 0x220a85, - 0x3a5a06, - 0x24acc3, - 0x208782, - 0x307847, - 0x4d42, - 0x256145, - 0x37b505, + 0x237b42, + 0x2e0344, + 0x217702, + 0x206b42, + 0xf743, + 0x2fb087, + 0x2e4c85, + 0x21a202, + 0x31e584, + 0x394cc2, + 0x2f5088, + 0x2109c3, + 0x37f588, + 0x201f82, + 0x22fe45, + 0x39e446, + 0x21f143, + 0x204f82, + 0x305d87, + 0x15c82, + 0x24c4c5, + 0x34d785, 0x2010c2, - 0x20b242, - 0x3d074a, - 0x28358a, - 0x2827c2, - 0x2b6ac4, - 0x201b02, - 0x325548, - 0x203b42, - 0x22a888, + 0x23f602, + 0x29430a, + 0x27e7ca, + 0x27ccc2, + 0x2ad344, + 0x202a42, + 0x301e08, + 0x218182, + 0x3d2048, 0x1101, - 0x31c507, - 0x31d209, - 0x2561c2, - 0x3231c5, - 0x381a85, - 0x21310b, - 0x34b3cc, - 0x236f48, - 0x33b3c8, - 0x205402, - 0x2a5982, + 0x31b347, + 0x31c049, + 0x24c542, + 0x322f85, + 0x275c45, + 0x20f90b, + 0x32210c, + 0x22d408, + 0x337888, + 0x3d5502, + 0x29e002, 0x2000c2, - 0x1c3448, - 0x20b642, - 0x21a043, + 0x1b9688, + 0x202202, + 0x206643, 0x200382, - 0x201f42, + 0x2018c2, 0x2003c2, - 0x24acc3, - 0x206382, + 0x21f143, + 0x202ec2, 0x2000c2, - 0x14f985, - 0x8fa0b642, - 0x114bc4, - 0x44d85, - 0x90a0f583, - 0xc3fc4, - 0x222043, + 0x14cb05, + 0x91602202, + 0x113144, + 0x3ab05, + 0x92605503, + 0xbd504, + 0x20f743, 0x200ec2, - 0x214883, - 0x3c8a03, - 0x90e4acc3, - 0x305103, - 0x2e7906, - 0x198285, - 0x1619303, - 0x1bf3c5, - 0x14f985, - 0x1519cb, - 0x1c3448, - 0x8fe0dcc8, - 0x5aec7, - 0x903c488a, - 0x10210c, - 0x1c3247, - 0x1a4045, - 0x907ecf49, - 0x2facd, - 0x8a3c2, - 0x122182, + 0x2109c3, + 0x381cc3, + 0x92a1f143, + 0x303643, + 0x2e19c6, + 0x191445, + 0x1613dc3, + 0x1a2885, + 0x14cb05, + 0x14eecb, + 0x1b9688, + 0x91b3c4c8, + 0x671c7, + 0x91ed240a, + 0xf9d4c, + 0x1b9487, + 0x1cf805, + 0x92391889, + 0x206cd, + 0x3f842, + 0x121b42, 0xc01, - 0x103e84, - 0xbf58a, - 0x833c7, - 0x16dc4, - 0x16e03, - 0x16e04, - 0x91604042, - 0x91a00b02, - 0x91e01d42, - 0x922016c2, - 0x9260f202, - 0x92a03d82, - 0xf5d87, - 0x92e0b642, - 0x93218402, - 0x9362b602, - 0x93a101c2, - 0x221403, - 0x36604, - 0x93f008c8, - 0x239cc3, - 0x94219482, - 0x727c8, - 0x94603f82, - 0x871c7, - 0x1c1087, - 0x94a00042, - 0x94e04142, - 0x95200182, - 0x95606502, - 0x95a13f82, - 0x95e005c2, - 0x1a8385, - 0x21e503, - 0x3cc144, - 0x96200702, - 0x9661c4c2, - 0x96a03e02, - 0x9bc0b, - 0x96e03f02, - 0x976593c2, - 0x97a00ec2, - 0x97e024c2, - 0xa2388, - 0x9822ab02, - 0x98601942, - 0x98a0d3c2, - 0x98e82182, - 0x99203642, - 0x99609082, - 0x99a01f42, - 0x99e12b42, - 0x9a209882, - 0x9a60b482, - 0x149204, - 0x23d1c3, - 0x9aa0b342, - 0x9ae1a182, - 0x9b206a42, - 0x9b6006c2, - 0x9ba003c2, - 0x9be07002, - 0x104c48, - 0xac107, - 0x9c205442, - 0x9c6026c2, - 0x9ca06382, - 0x9ce213c2, - 0x16018c, - 0x9d2021c2, - 0x9d62ebc2, - 0x9da07182, - 0x9de07882, - 0x9e20b382, - 0x9e609142, - 0x9ea0d442, - 0x9ee138c2, - 0x9f28c442, - 0x9f68cd82, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x24b03, - 0xcd883, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x9720de83, - 0x224b03, - 0x3b8384, - 0x366886, - 0x310783, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x378049, - 0x206282, - 0x370903, - 0x2d5c43, - 0x20cac5, - 0x207783, - 0x20de83, - 0x224b03, - 0x2f33c3, - 0x23fac3, - 0x227b89, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x20de83, - 0x224b03, - 0x206282, - 0x206282, - 0x20de83, - 0x224b03, - 0x9fe1a043, - 0x226783, - 0x3a8183, - 0x214e43, - 0x214883, - 0x24acc3, - 0x1c3448, - 0x20b642, - 0x21a043, - 0x214883, - 0x24acc3, - 0x14e5c2, - 0x21a043, - 0x226783, - 0x20f583, - 0xa0913002, - 0x214e43, - 0x214883, - 0x24acc3, + 0xfbc44, + 0xb7d8a, + 0x7e607, + 0x13550f, + 0x16f44, + 0x16f83, + 0x16f84, + 0x6a08b, + 0xa74d, + 0x9320bb02, + 0x93600b02, + 0x93a028c2, + 0x93e01942, + 0x94203602, + 0x94601782, + 0xf08c7, + 0x94a02202, + 0x94e11802, + 0x95223fc2, + 0x956023c2, + 0x2195c3, + 0x1b7344, + 0x95a58a88, + 0x22df83, + 0x95e13f42, + 0x6b4c8, + 0x96208782, + 0x55dc7, + 0x1b5407, + 0x96600042, + 0x96a03582, + 0x96e00182, + 0x97203042, + 0x97604142, + 0x97a005c2, + 0x186f05, + 0x2136c3, + 0x3bf3c4, + 0x97e00702, + 0x98239842, + 0x98604fc2, + 0x93e0b, + 0x98a05e42, + 0x9924e442, + 0x99600ec2, + 0x99a01e42, + 0x9b108, + 0x99e1cf02, + 0x9a201502, + 0x9a603c42, + 0x9aa7d202, + 0x9ae024c2, + 0x9b203702, + 0x9b6018c2, + 0x9ba1e342, + 0x9be0abc2, + 0x9c239e02, + 0x11ff04, + 0x3ccd03, + 0x9c636d82, + 0x9ca06782, + 0x9ce0aec2, + 0x9d2006c2, + 0x9d6003c2, + 0x9da06702, + 0x102ac8, + 0xafa47, + 0x9de02b02, + 0x9e202b42, + 0x9e602ec2, + 0x9ea06ec2, + 0x167fcc, + 0x9ee01b42, + 0x9f2272c2, + 0x9f616002, + 0x9fa046c2, + 0x9fe0c302, + 0xa0204002, + 0xa06059c2, + 0xa0a03dc2, + 0xa0e84e42, + 0xa1285782, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x1a403, + 0xd58c3, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x98f3c683, + 0x21a403, + 0x3cc4c4, + 0x239a46, + 0x30e543, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x3de009, + 0x202dc2, + 0x3dbe43, + 0x2ce9c3, + 0x33b705, + 0x203b43, + 0x33c683, + 0x21a403, + 0x2eddc3, + 0x240b83, + 0x221649, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x33c683, + 0x21a403, + 0x202dc2, + 0x202dc2, + 0x33c683, + 0x21a403, + 0xa1a06643, + 0x21f603, + 0x3a0e83, + 0x206543, + 0x2109c3, + 0x21f143, + 0x1b9688, + 0x202202, + 0x206643, + 0x2109c3, + 0x21f143, + 0x14b742, + 0x206643, + 0x21f603, + 0x205503, + 0xa2500dc2, + 0x206543, + 0x2109c3, + 0x21f143, 0xcc1, - 0x2346c4, - 0x262603, - 0x20b642, - 0x21a043, - 0x3bfcc3, - 0x226783, - 0x2607c4, - 0x204703, - 0x20f583, - 0x25c204, - 0x20bc83, - 0x214e43, - 0x214883, - 0x208c43, - 0x24acc3, - 0x240f03, - 0x2423c3, - 0x33a505, - 0x23fac3, + 0x204884, + 0x266483, + 0x202202, + 0x206643, + 0x3a3183, + 0x21f603, + 0x2554c4, + 0x3d6403, + 0x205503, + 0x2503c4, + 0x211e43, + 0x206543, + 0x2109c3, + 0x21d783, + 0x21f143, + 0x235ec3, + 0x237343, + 0x33d845, + 0x240b83, 0x200f83, 0x882, - 0x20b642, - 0x21a043, - 0x20de83, - 0x214883, - 0x24acc3, + 0x202202, + 0x206643, + 0x33c683, + 0x2109c3, + 0x21f143, 0x2000c2, - 0x20bec3, - 0x1c3448, - 0x21a043, - 0x226783, - 0x20f583, - 0x23c686, - 0x25c204, - 0x20bc83, - 0x225a04, - 0x214883, - 0x24acc3, - 0x205443, - 0x11d44, - 0x16e3c2, - 0x21a043, - 0x24dc3, - 0x226783, + 0x250b03, + 0x1b9688, + 0x206643, + 0x21f603, + 0x205503, + 0x231386, + 0x2503c4, + 0x211e43, + 0x294744, + 0x2109c3, + 0x21f143, + 0x219683, + 0x6204, + 0x1547c2, + 0x206643, + 0x1a6c3, + 0x21f603, 0xec2, - 0x214883, - 0x24acc3, - 0x791c4, - 0x78884, - 0x19c2, - 0x1568287, - 0x152347, - 0x21a043, - 0x38286, - 0x226783, - 0x20f583, - 0xfda46, - 0x214883, - 0x24acc3, - 0x337788, - 0x33b209, - 0x34a749, - 0x354348, - 0x3a7d88, - 0x3a7d89, - 0x33060a, - 0x369eca, - 0x3a280a, - 0x3a8f8a, - 0x3de04a, - 0x3ede8b, - 0x365a4d, - 0x24a2cf, - 0x24fe10, - 0x36e74d, - 0x38ca8c, - 0x3a8ccb, - 0x1a3447, - 0x13454e, - 0x138a0a, - 0x13af09, - 0x14a749, - 0x16c949, - 0x16cb8a, - 0x176f49, - 0x177889, - 0x17898b, - 0x19c08, - 0x112e48, - 0x59d09, - 0x149e747, - 0xe9545, - 0x21a043, - 0x226783, - 0x20f583, - 0x214e43, - 0x214883, - 0x24acc3, - 0x4acc3, + 0x2109c3, + 0x21f143, + 0x72e84, + 0x72544, + 0xc642, + 0x158a7c7, + 0x7247, + 0x206643, + 0x34106, + 0x21f603, + 0x205503, + 0xf7e06, + 0x2109c3, + 0x21f143, + 0x3334c8, + 0x3376c9, + 0x348109, + 0x350ac8, + 0x3a0a88, + 0x3a0a89, + 0x32e0ca, + 0x3649ca, + 0x39b18a, + 0x3a1a4a, + 0x3dfe4a, + 0x3ebacb, + 0x24144d, + 0x242a0f, + 0x246150, + 0x36894d, + 0x38618c, + 0x3a178b, + 0x1784c7, + 0x13098e, + 0x13530a, + 0x1373c9, + 0x148109, + 0x167709, + 0x16794a, + 0x171749, + 0x1724c9, + 0x172fcb, + 0x6208, + 0x100c08, + 0x1709, + 0x1495607, + 0xe3dc5, + 0x206643, + 0x21f603, + 0x205503, + 0x206543, + 0x2109c3, + 0x21f143, + 0x1f143, 0x2000c2, - 0x23ac85, - 0x222003, - 0xa4e0b642, - 0x226783, - 0x20f583, - 0x2c1a07, - 0x21e803, - 0x214e43, - 0x214883, - 0x262243, - 0x21c243, - 0x20f0c3, - 0x208c43, - 0x24acc3, - 0x3ae506, - 0x249902, + 0x3be445, + 0x206b03, + 0xa6a02202, + 0x21f603, + 0x205503, + 0x3907c7, + 0x2721c3, + 0x206543, + 0x2109c3, + 0x22b643, + 0x20f4c3, + 0x2034c3, + 0x21d783, + 0x21f143, + 0x3a6386, + 0x23d942, 0x200f83, - 0x1c3448, + 0x1b9688, 0x2000c2, - 0x20bec3, - 0x20b642, - 0x21a043, - 0x226783, - 0x20f583, - 0x25c204, - 0x214e43, - 0x214883, - 0x24acc3, - 0x219303, - 0x152347, - 0x13c02, - 0x148744, - 0x153a806, + 0x250b03, + 0x202202, + 0x206643, + 0x21f603, + 0x205503, + 0x2503c4, + 0x206543, + 0x2109c3, + 0x21f143, + 0x213dc3, + 0x7247, + 0xd842, + 0x13ad44, + 0x1544746, 0x2000c2, - 0x20b642, - 0x20f583, - 0x214e43, - 0x24acc3, + 0x202202, + 0x205503, + 0x206543, + 0x21f143, } // children is the list of nodes' children, the parent's wildcard bit and the @@ -9884,6 +9899,7 @@ var nodes = [...]uint32{ // will be in the range [0, 6), depending on the wildcard bit and node type. // // The layout within the uint32, from MSB to LSB, is: +// // [ 1 bits] unused // [ 1 bits] wildcard bit // [ 2 bits] node type @@ -9896,667 +9912,674 @@ var children = [...]uint32{ 0x40000000, 0x50000000, 0x60000000, - 0x17ac5e4, - 0x17b05eb, - 0x17b45ec, - 0x17d45ed, - 0x192c5f5, - 0x194064b, - 0x1954650, - 0x1968655, - 0x198465a, - 0x19a8661, - 0x19c066a, - 0x1a0c670, - 0x1a10683, - 0x1a38684, - 0x1a3c68e, - 0x1a5468f, - 0x1a58695, + 0x179c5e0, + 0x17a05e7, + 0x17a45e8, + 0x17c45e9, + 0x191c5f1, + 0x1930647, + 0x194464c, + 0x1958651, + 0x1974656, + 0x199865d, + 0x19b0666, + 0x1a0066c, + 0x1a04680, + 0x1a3c681, + 0x1a4068f, + 0x1a58690, 0x1a5c696, - 0x1aa0697, - 0x1aa46a8, + 0x1a60697, + 0x1aa4698, 0x1aa86a9, - 0x21aac6aa, - 0x61ab46ab, - 0x21abc6ad, - 0x1b046af, - 0x1b106c1, - 0x21b146c4, - 0x1b386c5, - 0x1b3c6ce, - 0x1b506cf, - 0x1b546d4, - 0x1b746d5, - 0x1ba46dd, - 0x1bc46e9, - 0x1bcc6f1, - 0x1bf46f3, - 0x1c106fd, - 0x21c14704, + 0x1aac6aa, + 0x21ab06ab, + 0x61ab86ac, + 0x21ac06ae, + 0x1b086b0, + 0x1b146c2, + 0x21b186c5, + 0x1b3c6c6, + 0x1b406cf, + 0x1b546d0, + 0x1b586d5, + 0x1b786d6, + 0x1ba86de, + 0x1bc86ea, + 0x1bd06f2, + 0x1bf86f4, + 0x1c146fe, 0x21c18705, - 0x1c1c706, - 0x1cb4707, - 0x1cc872d, - 0x1cdc732, - 0x1d14737, - 0x1d24745, - 0x1d38749, - 0x1d5074e, - 0x1df4754, - 0x202877d, - 0x203080a, - 0x2203480c, + 0x21c1c706, + 0x1c20707, + 0x1cb8708, + 0x1ccc72e, + 0x1ce0733, + 0x1d18738, + 0x1d28746, + 0x1d3c74a, + 0x1d5474f, + 0x1df8755, + 0x202c77e, + 0x203480b, 0x2203880d, - 0x20a480e, - 0x2110829, - 0x2128844, - 0x213c84a, - 0x214084f, + 0x2203c80e, + 0x20a880f, + 0x211482a, + 0x212c845, + 0x214084b, 0x2144850, - 0x214c851, - 0x2164853, - 0x2168859, - 0x218485a, - 0x21d8861, - 0x21dc876, - 0x221e0877, - 0x2200878, - 0x2204880, - 0x2208881, - 0x2238882, - 0x6223c88e, - 0x2224488f, - 0x22248891, - 0x228c892, - 0x622908a3, - 0x22a48a4, - 0x23048a9, - 0x223088c1, - 0x2230c8c2, - 0x223148c3, + 0x2148851, + 0x2150852, + 0x2168854, + 0x216c85a, + 0x218885b, + 0x21dc862, + 0x21e0877, + 0x221e4878, + 0x2208879, + 0x2220c882, + 0x2210883, + 0x2214884, + 0x2244885, + 0x62248891, + 0x22250892, + 0x22254894, + 0x2298895, + 0x6229c8a6, + 0x22b08a7, + 0x23108ac, + 0x223148c4, 0x223188c5, - 0x2231c8c6, - 0x23208c7, - 0x23288c8, + 0x223208c6, + 0x223248c8, + 0x223288c9, 0x232c8ca, - 0x223388cb, - 0x223408ce, - 0x23508d0, - 0x23608d4, - 0x24148d8, - 0x2418905, - 0x22428906, - 0x2242c90a, - 0x2243490b, - 0x249090d, - 0x2494924, - 0x2498925, - 0x249c926, - 0x2a90927, - 0x2a94aa4, - 0x22b3caa5, - 0x22b40acf, - 0x22b44ad0, - 0x22b50ad1, - 0x22b54ad4, - 0x22b60ad5, - 0x22b64ad8, - 0x22b68ad9, - 0x22b6cada, + 0x23348cb, + 0x23388cd, + 0x223448ce, + 0x2234c8d1, + 0x235c8d3, + 0x236c8d7, + 0x24208db, + 0x2424908, + 0x22434909, + 0x2243890d, + 0x2244090e, + 0x249c910, + 0x24a0927, + 0x24a4928, + 0x24a8929, + 0x2a9c92a, + 0x2aa0aa7, + 0x22b48aa8, + 0x22b4cad2, + 0x22b50ad3, + 0x22b5cad4, + 0x22b60ad7, + 0x22b6cad8, 0x22b70adb, 0x22b74adc, - 0x22b80add, - 0x22b84ae0, - 0x22b90ae1, - 0x22b94ae4, - 0x22b98ae5, - 0x22b9cae6, - 0x22ba8ae7, - 0x22bacaea, - 0x22bb8aeb, - 0x22bbcaee, - 0x22bc0aef, - 0x22bc4af0, - 0x2bc8af1, + 0x22b78add, + 0x22b7cade, + 0x22b80adf, + 0x22b8cae0, + 0x22b90ae3, + 0x22b9cae4, + 0x22ba0ae7, + 0x22ba4ae8, + 0x22ba8ae9, + 0x22bb4aea, + 0x22bb8aed, + 0x22bc4aee, + 0x22bc8af1, 0x22bccaf2, - 0x22bd8af3, - 0x22bdcaf6, - 0x2be0af7, - 0x2be8af8, - 0x62bf4afa, - 0x22bfcafd, - 0x2c40aff, - 0x22c60b10, - 0x22c64b18, - 0x22c68b19, - 0x22c70b1a, - 0x22c78b1c, - 0x22c7cb1e, - 0x22c80b1f, - 0x22c88b20, + 0x22bd0af3, + 0x2bd4af4, + 0x22bd8af5, + 0x22be4af6, + 0x22be8af9, + 0x2becafa, + 0x2bf4afb, + 0x62c00afd, + 0x22c08b00, + 0x2c4cb02, + 0x22c6cb13, + 0x22c70b1b, + 0x22c74b1c, + 0x22c7cb1d, + 0x22c84b1f, + 0x22c88b21, 0x22c8cb22, - 0x22c90b23, - 0x2c94b24, - 0x22cc0b25, - 0x22cc4b30, - 0x22cc8b31, - 0x2cccb32, - 0x22cd0b33, + 0x22c94b23, + 0x22c98b25, + 0x22c9cb26, + 0x22ca0b27, + 0x2ca4b28, + 0x22cd0b29, 0x22cd4b34, - 0x22ce0b35, + 0x22cd8b35, + 0x2cdcb36, + 0x22ce0b37, 0x22ce4b38, - 0x2ce8b39, - 0x2cf0b3a, - 0x2cfcb3c, - 0x2d04b3f, - 0x2d20b41, - 0x2d38b48, - 0x2d3cb4e, - 0x2d4cb4f, - 0x2d58b53, - 0x2d8cb56, - 0x2d94b63, - 0x22d98b65, - 0x2db0b66, - 0x22db8b6c, + 0x22cf0b39, + 0x22cf4b3c, + 0x2cf8b3d, + 0x2d00b3e, + 0x2d0cb40, + 0x2d14b43, + 0x2d30b45, + 0x2d48b4c, + 0x2d60b52, + 0x2d70b58, + 0x2d7cb5c, + 0x2db0b5f, + 0x2db8b6c, 0x22dbcb6e, - 0x22dc4b6f, - 0x2ed0b71, - 0x22ed4bb4, - 0x2edcbb5, - 0x2ee0bb7, - 0x22ee4bb8, - 0x2ee8bb9, - 0x2f34bba, - 0x2f38bcd, - 0x2f3cbce, - 0x2f58bcf, - 0x2f6cbd6, - 0x2f94bdb, - 0x2fbcbe5, - 0x2fc0bef, - 0x62fc4bf0, - 0x2ff4bf1, - 0x2ff8bfd, - 0x22ffcbfe, - 0x3000bff, - 0x3028c00, - 0x302cc0a, - 0x3050c0b, - 0x3054c14, - 0x306cc15, - 0x3070c1b, - 0x3074c1c, - 0x3094c1d, - 0x30b4c25, - 0x230b8c2d, - 0x30bcc2e, - 0x230c0c2f, - 0x30c4c30, - 0x30c8c31, - 0x30ccc32, - 0x30d0c33, - 0x30f0c34, - 0x230f4c3c, - 0x230fcc3d, + 0x2dd4b6f, + 0x22ddcb75, + 0x22de0b77, + 0x22de8b78, + 0x2ef8b7a, + 0x22efcbbe, + 0x2f04bbf, + 0x2f08bc1, + 0x22f0cbc2, + 0x22f10bc3, + 0x22f14bc4, + 0x2f18bc5, + 0x2f64bc6, + 0x2f68bd9, + 0x2f6cbda, + 0x2f88bdb, + 0x2f9cbe2, + 0x2fc4be7, + 0x2fecbf1, + 0x2ff0bfb, + 0x62ff4bfc, + 0x3024bfd, + 0x3028c09, + 0x2302cc0a, + 0x3030c0b, + 0x3058c0c, + 0x305cc16, + 0x3080c17, + 0x3084c20, + 0x309cc21, + 0x30a0c27, + 0x30a4c28, + 0x30c4c29, + 0x30e4c31, + 0x230e8c39, + 0x30ecc3a, + 0x230f0c3b, + 0x30f4c3c, + 0x30f8c3d, + 0x30fcc3e, 0x3100c3f, - 0x3128c40, - 0x313cc4a, - 0x31bcc4f, - 0x31c4c6f, - 0x31c8c71, - 0x31e4c72, - 0x31fcc79, - 0x3200c7f, - 0x3214c80, + 0x3120c40, + 0x23124c48, + 0x2312cc49, + 0x3130c4b, + 0x3158c4c, + 0x316cc56, + 0x31ecc5b, + 0x31f4c7b, + 0x31f8c7d, + 0x3214c7e, 0x322cc85, - 0x3248c8b, - 0x3260c92, - 0x3268c98, - 0x3284c9a, - 0x329cca1, - 0x32a0ca7, - 0x32c8ca8, - 0x32e8cb2, - 0x3304cba, - 0x3308cc1, - 0x336ccc2, - 0x3388cdb, - 0x33b0ce2, - 0x33b4cec, - 0x33ccced, - 0x3410cf3, - 0x3490d04, - 0x34d0d24, - 0x34d4d34, - 0x34e0d35, - 0x3500d38, - 0x3508d40, - 0x3528d42, - 0x3530d4a, - 0x3574d4c, - 0x35c8d5d, - 0x35ccd72, - 0x36dcd73, - 0x236e4db7, - 0x236e8db9, - 0x236ecdba, - 0x236f0dbb, - 0x236f4dbc, - 0x36f8dbd, - 0x236fcdbe, - 0x23700dbf, - 0x23704dc0, - 0x3708dc1, - 0x370cdc2, - 0x23710dc3, - 0x23720dc4, - 0x23728dc8, - 0x23730dca, + 0x3230c8b, + 0x3244c8c, + 0x325cc91, + 0x3278c97, + 0x3290c9e, + 0x329cca4, + 0x32b8ca7, + 0x32d0cae, + 0x32d4cb4, + 0x32fccb5, + 0x331ccbf, + 0x3338cc7, + 0x333ccce, + 0x33a0ccf, + 0x33bcce8, + 0x33e4cef, + 0x33e8cf9, + 0x3400cfa, + 0x3444d00, + 0x34c4d11, + 0x3504d31, + 0x3508d41, + 0x350cd42, + 0x3518d43, + 0x3538d46, + 0x3544d4e, + 0x3564d51, + 0x356cd59, + 0x35b0d5b, + 0x3604d6c, + 0x3608d81, + 0x371cd82, + 0x23724dc7, + 0x23728dc9, + 0x2372cdca, + 0x23730dcb, 0x23734dcc, - 0x2373cdcd, + 0x23738dcd, + 0x2373cdce, 0x23740dcf, - 0x23744dd0, - 0x375cdd1, - 0x3780dd7, - 0x37a0de0, - 0x3e18de8, - 0x23e1cf86, - 0x23e20f87, - 0x23e24f88, - 0x23e28f89, - 0x3e38f8a, - 0x3e58f8e, - 0x4018f96, - 0x40e9006, - 0x415903a, - 0x41b1056, - 0x429906c, - 0x42f10a6, - 0x432d0bc, - 0x44290cb, - 0x44f510a, - 0x458d13d, - 0x461d163, - 0x4681187, - 0x48b91a0, - 0x497122e, - 0x4a3d25c, - 0x4a8928f, - 0x4b112a2, - 0x4b4d2c4, - 0x4b9d2d3, - 0x4c152e7, - 0x64c19305, - 0x64c1d306, - 0x64c21307, - 0x4c9d308, - 0x4cf9327, - 0x4d7533e, - 0x4ded35d, - 0x4e6d37b, - 0x4ed939b, - 0x50053b6, - 0x505d401, - 0x65061417, - 0x50f9418, - 0x510143e, - 0x25105440, - 0x518d441, - 0x51d9463, - 0x5241476, - 0x52e9490, - 0x53b14ba, - 0x54194ec, - 0x552d506, - 0x6553154b, - 0x6553554c, - 0x559154d, - 0x55ed564, - 0x567d57b, - 0x56f959f, - 0x573d5be, - 0x58215cf, - 0x5855608, - 0x58b5615, - 0x592962d, - 0x59b164a, - 0x59f166c, - 0x5a6167c, - 0x65a65698, - 0x5a89699, - 0x5a8d6a2, - 0x5abd6a3, - 0x5ad96af, - 0x5b1d6b6, - 0x5b2d6c7, - 0x5b456cb, - 0x5bbd6d1, - 0x5bc56ef, - 0x5be16f1, - 0x5bf56f8, - 0x5c156fd, - 0x25c19705, - 0x5c41706, - 0x5c45710, - 0x5c4d711, - 0x5c61713, - 0x5c7d718, - 0x5c8571f, - 0x5c91721, - 0x5c95724, - 0x5cd1725, - 0x5cd5734, - 0x5cdd735, - 0x5cf1737, - 0x5d1973c, - 0x5d21746, - 0x5d25748, - 0x5d49749, - 0x5d6d752, - 0x5d8575b, - 0x5d89761, - 0x5d91762, - 0x5d99764, - 0x5dad766, - 0x5e6976b, - 0x5e6d79a, - 0x5e7579b, - 0x5e7979d, - 0x5e9d79e, - 0x5ebd7a7, - 0x5ed97af, - 0x5ee97b6, - 0x5efd7ba, - 0x5f057bf, - 0x5f0d7c1, - 0x5f117c3, - 0x5f197c4, - 0x5f357c6, - 0x5f457cd, + 0x3744dd0, + 0x3748dd1, + 0x2374cdd2, + 0x2375cdd3, + 0x23764dd7, + 0x2376cdd9, + 0x23770ddb, + 0x23778ddc, + 0x2377cdde, + 0x23780ddf, + 0x3798de0, + 0x37bcde6, + 0x37dcdef, + 0x3e54df7, + 0x23e58f95, + 0x23e5cf96, + 0x23e60f97, + 0x23e64f98, + 0x3e74f99, + 0x3e94f9d, + 0x4054fa5, + 0x4125015, + 0x4195049, + 0x41ed065, + 0x42d507b, + 0x432d0b5, + 0x43690cb, + 0x44650da, + 0x4531119, + 0x45c914c, + 0x4659172, + 0x46bd196, + 0x48f51af, + 0x49ad23d, + 0x4a7926b, + 0x4ac529e, + 0x4b4d2b1, + 0x4b892d3, + 0x4bd92e2, + 0x4c512f6, + 0x64c55314, + 0x64c59315, + 0x64c5d316, + 0x4cd9317, + 0x4d35336, + 0x4db134d, + 0x4e2936c, + 0x4ea938a, + 0x4f153aa, + 0x50413c5, + 0x5099410, + 0x6509d426, + 0x5135427, + 0x513d44d, + 0x2514144f, + 0x51c9450, + 0x5215472, + 0x527d485, + 0x532549f, + 0x53ed4c9, + 0x54554fb, + 0x5569515, + 0x6556d55a, + 0x6557155b, + 0x55cd55c, + 0x5629573, + 0x56b958a, + 0x57355ae, + 0x57795cd, + 0x585d5de, + 0x5891617, + 0x58f1624, + 0x596563c, + 0x59ed659, + 0x5a2d67b, + 0x5a9d68b, + 0x65aa16a7, + 0x5ac56a8, + 0x5ac96b1, + 0x5af96b2, + 0x5b156be, + 0x5b596c5, + 0x5b696d6, + 0x5b816da, + 0x5bf96e0, + 0x5c016fe, + 0x5c1d700, + 0x5c31707, + 0x5c5170c, + 0x25c55714, + 0x5c7d715, + 0x5c8171f, + 0x5c89720, + 0x5c9d722, + 0x5cb9727, + 0x5cc172e, + 0x5ccd730, + 0x5cd1733, + 0x5d0d734, + 0x5d11743, + 0x5d19744, + 0x5d2d746, + 0x5d5574b, + 0x5d5d755, + 0x5d61757, + 0x5d85758, + 0x5da9761, + 0x5dc176a, + 0x5dc5770, + 0x5dcd771, + 0x5dd5773, + 0x5de9775, + 0x5ea177a, + 0x5ea57a8, + 0x5ead7a9, + 0x5eb17ab, + 0x5ed57ac, + 0x5ef57b5, + 0x5f117bd, + 0x5f217c4, + 0x5f357c8, + 0x5f3d7cd, + 0x5f457cf, 0x5f497d1, - 0x5f657d2, - 0x67ed7d9, - 0x68259fb, - 0x6851a09, - 0x6869a14, - 0x688da1a, - 0x68ada23, - 0x68f1a2b, - 0x68f9a3c, - 0x268fda3e, - 0x26901a3f, - 0x6909a40, - 0x6b4da42, - 0x26b51ad3, - 0x26b55ad4, - 0x6b69ad5, - 0x26b6dada, - 0x6b71adb, - 0x6b79adc, - 0x26b85ade, - 0x26b95ae1, - 0x26b9dae5, - 0x26ba9ae7, + 0x5f517d2, + 0x5f6d7d4, + 0x5f7d7db, + 0x5f817df, + 0x5f9d7e0, + 0x68257e7, + 0x685da09, + 0x6889a17, + 0x68a1a22, + 0x68c5a28, + 0x68e5a31, + 0x6929a39, + 0x6931a4a, + 0x26935a4c, + 0x26939a4d, + 0x6941a4e, + 0x6b89a50, + 0x26b8dae2, + 0x26b91ae3, + 0x6ba5ae4, + 0x26ba9ae9, 0x6badaea, - 0x26bb1aeb, - 0x26bc9aec, - 0x26bd1af2, + 0x6bb5aeb, + 0x26bc1aed, + 0x26bd1af0, 0x26bd9af4, - 0x26bddaf6, - 0x26be5af7, + 0x26be5af6, 0x6be9af9, 0x26bedafa, - 0x6bf1afb, - 0x26bfdafc, - 0x6c05aff, - 0x6c19b01, - 0x6c1db06, - 0x6c45b07, - 0x6c81b11, - 0x6c85b20, - 0x6cbdb21, - 0x6ce5b2f, - 0x7841b39, - 0x7845e10, - 0x7849e11, - 0x2784de12, - 0x7851e13, - 0x27855e14, - 0x7859e15, - 0x27865e16, - 0x7869e19, - 0x786de1a, - 0x27871e1b, - 0x7875e1c, - 0x2787de1d, + 0x26c05afb, + 0x26c0db01, + 0x26c15b03, + 0x26c19b05, + 0x26c21b06, + 0x26c25b08, + 0x6c29b09, + 0x26c2db0a, + 0x6c31b0b, + 0x26c3db0c, + 0x6c45b0f, + 0x6c59b11, + 0x6c5db16, + 0x6c85b17, + 0x6cc1b21, + 0x6cc5b30, + 0x6cfdb31, + 0x6d1db3f, + 0x7879b47, + 0x787de1e, 0x7881e1f, - 0x7885e20, - 0x27895e21, - 0x7899e25, - 0x789de26, + 0x27885e20, + 0x7889e21, + 0x2788de22, + 0x7891e23, + 0x2789de24, 0x78a1e27, 0x78a5e28, 0x278a9e29, 0x78ade2a, - 0x78b1e2b, - 0x78b5e2c, + 0x278b5e2b, 0x78b9e2d, - 0x278c1e2e, - 0x78c5e30, - 0x78c9e31, - 0x78cde32, - 0x278d1e33, + 0x78bde2e, + 0x278cde2f, + 0x78d1e33, 0x78d5e34, - 0x278dde35, + 0x78d9e35, + 0x78dde36, 0x278e1e37, - 0x78fde38, - 0x790de3f, - 0x794de43, - 0x7951e53, - 0x7975e54, - 0x7989e5d, - 0x798de62, - 0x7999e63, - 0x7b61e66, - 0x27b65ed8, - 0x27b6ded9, - 0x27b71edb, - 0x27b75edc, - 0x7b7dedd, - 0x7c59edf, - 0x27c65f16, - 0x27c69f19, - 0x27c6df1a, - 0x27c71f1b, - 0x7c75f1c, - 0x7ca1f1d, - 0x7cb9f28, - 0x7cbdf2e, - 0x7cddf2f, - 0x7ce9f37, - 0x7d09f3a, - 0x7d0df42, - 0x7d45f43, - 0x800df51, - 0x80ca003, - 0x80ce032, - 0x80d2033, - 0x80e6034, - 0x80ea039, - 0x811e03a, - 0x8156047, - 0x2815a055, - 0x8176056, - 0x819a05d, - 0x819e066, - 0x81be067, - 0x81da06f, - 0x81fe076, - 0x820e07f, - 0x8212083, - 0x8216084, - 0x824e085, - 0x825a093, - 0x8282096, - 0x282860a0, - 0x83220a1, - 0x283260c8, - 0x832a0c9, - 0x833a0ca, - 0x2833e0ce, - 0x83520cf, - 0x836e0d4, - 0x838e0db, - 0x83920e3, - 0x83a60e4, - 0x83ba0e9, - 0x83be0ee, - 0x83c60ef, - 0x83ca0f1, - 0x83ea0f2, - 0x84a20fa, - 0x284a6128, - 0x84aa129, - 0x84ca12a, - 0x84f6132, - 0x2850613d, - 0x850a141, - 0x8516142, - 0x855a145, - 0x855e156, - 0x8572157, - 0x859215c, - 0x85ae164, - 0x85ba16b, - 0x85da16e, - 0x860a176, - 0x8616182, - 0x86e6185, - 0x86ea1b9, - 0x86fe1ba, - 0x87021bf, - 0x871a1c0, - 0x871e1c6, - 0x872a1c7, - 0x87361ca, - 0x873a1cd, - 0x87421ce, + 0x78e5e38, + 0x78e9e39, + 0x78ede3a, + 0x78f1e3b, + 0x278f9e3c, + 0x78fde3e, + 0x7901e3f, + 0x7905e40, + 0x27909e41, + 0x790de42, + 0x27915e43, + 0x27919e45, + 0x7935e46, + 0x7945e4d, + 0x7985e51, + 0x7989e61, + 0x79ade62, + 0x79c1e6b, + 0x79c5e70, + 0x79d1e71, + 0x7b99e74, + 0x27b9dee6, + 0x27ba5ee7, + 0x27ba9ee9, + 0x27badeea, + 0x7bb5eeb, + 0x7c91eed, + 0x27c9df24, + 0x27ca1f27, + 0x27ca5f28, + 0x27ca9f29, + 0x7cadf2a, + 0x7cd9f2b, + 0x7cf1f36, + 0x7cf5f3c, + 0x7d15f3d, + 0x7d21f45, + 0x7d41f48, + 0x7d45f50, + 0x7d7df51, + 0x8045f5f, + 0x8102011, + 0x8106040, + 0x810a041, + 0x811e042, + 0x8122047, + 0x8156048, + 0x818e055, + 0x28192063, + 0x81ae064, + 0x81d206b, + 0x81d6074, + 0x81f6075, + 0x821207d, + 0x8236084, + 0x824608d, + 0x824a091, + 0x824e092, + 0x828a093, + 0x82960a2, + 0x82be0a5, + 0x282c20af, + 0x835e0b0, + 0x283620d7, + 0x83660d8, + 0x83760d9, + 0x2837a0dd, + 0x83920de, + 0x83ae0e4, + 0x83ce0eb, + 0x83d20f3, + 0x83e60f4, + 0x83fa0f9, + 0x83fe0fe, + 0x84060ff, + 0x840a101, + 0x842a102, + 0x84e210a, + 0x284e6138, + 0x84ea139, + 0x850a13a, + 0x8536142, + 0x2854614d, + 0x854a151, + 0x8556152, + 0x859a155, + 0x859e166, + 0x85b2167, + 0x85d216c, + 0x85ee174, + 0x85f217b, + 0x85fe17c, + 0x861e17f, + 0x864e187, + 0x865a193, + 0x872a196, + 0x872e1ca, + 0x87421cb, 0x87461d0, - 0x876a1d1, - 0x87a61da, - 0x87aa1e9, - 0x87ca1ea, - 0x881e1f2, - 0x884e207, - 0x28852213, - 0x885a214, - 0x88b2216, - 0x88b622c, - 0x88ba22d, - 0x88be22e, - 0x890222f, - 0x8912240, - 0x894e244, - 0x8952253, - 0x8982254, - 0x8ace260, - 0x8af22b3, - 0x8b322bc, - 0x8b622cc, - 0x28b6a2d8, - 0x28b6e2da, - 0x28b722db, - 0x8b7a2dc, - 0x8b862de, - 0x8caa2e1, - 0x8cb632a, - 0x8cc232d, - 0x8cce330, - 0x8cda333, - 0x8ce6336, - 0x8cf2339, - 0x8cfe33c, - 0x8d0a33f, - 0x8d16342, - 0x8d22345, - 0x28d26348, - 0x8d32349, - 0x8d3e34c, - 0x8d4a34f, - 0x8d52352, - 0x8d5e354, + 0x875e1d1, + 0x87621d7, + 0x876e1d8, + 0x877a1db, + 0x877e1de, + 0x87861df, + 0x878a1e1, + 0x87ae1e2, + 0x87ea1eb, + 0x87ee1fa, + 0x880e1fb, + 0x8846203, + 0x8876211, + 0x2887a21d, + 0x887e21e, + 0x888621f, + 0x88de221, + 0x88e2237, + 0x88e6238, + 0x88ea239, + 0x892e23a, + 0x893e24b, + 0x897a24f, + 0x897e25e, + 0x89ae25f, + 0x8afa26b, + 0x8b1e2be, + 0x8b5e2c7, + 0x8b8e2d7, + 0x28b962e3, + 0x28b9a2e5, + 0x28b9e2e6, + 0x8ba62e7, + 0x8bbe2e9, + 0x8ce22ef, + 0x8cee338, + 0x8cfa33b, + 0x8d0633e, + 0x8d12341, + 0x8d1e344, + 0x8d2a347, + 0x8d3634a, + 0x8d4234d, + 0x8d4e350, + 0x8d5a353, + 0x28d5e356, 0x8d6a357, 0x8d7635a, 0x8d8235d, - 0x8d8e360, - 0x8d9a363, - 0x8da6366, - 0x8db2369, - 0x8dbe36c, - 0x8dca36f, - 0x8dd6372, - 0x8e02375, + 0x8d8a360, + 0x8d96362, + 0x8da2365, + 0x8dae368, + 0x8dba36b, + 0x8dc636e, + 0x8dd2371, + 0x8dde374, + 0x8dea377, + 0x8df637a, + 0x8e0237d, 0x8e0e380, - 0x8e1a383, - 0x8e26386, - 0x8e32389, - 0x8e3e38c, - 0x8e4638f, + 0x8e3a383, + 0x8e4638e, 0x8e52391, 0x8e5e394, 0x8e6a397, 0x8e7639a, - 0x8e8239d, - 0x8e8e3a0, - 0x8e9a3a3, - 0x8ea63a6, - 0x8eb23a9, - 0x8ebe3ac, - 0x8eca3af, - 0x8ed23b2, + 0x8e7e39d, + 0x8e8a39f, + 0x8e963a2, + 0x8ea23a5, + 0x8eae3a8, + 0x8eba3ab, + 0x8ec63ae, + 0x8ed23b1, 0x8ede3b4, - 0x8ee63b7, - 0x8ef23b9, - 0x8efe3bc, - 0x8f0a3bf, + 0x8eea3b7, + 0x8ef63ba, + 0x8f023bd, + 0x8f0a3c0, 0x8f163c2, - 0x8f223c5, - 0x8f2e3c8, - 0x8f3a3cb, - 0x8f463ce, - 0x8f4a3d1, - 0x8f563d2, - 0x8f6e3d5, - 0x8f723db, - 0x8f823dc, - 0x8fa23e0, - 0x8fa63e8, - 0x8ff63e9, - 0x8ffa3fd, - 0x900e3fe, - 0x9042403, - 0x9062410, - 0x9066418, - 0x906e419, - 0x909241b, - 0x90aa424, - 0x90c242a, - 0x90da430, - 0x9102436, - 0x9116440, - 0x912e445, - 0x913244b, - 0x2917a44c, - 0x917e45e, - 0x91aa45f, - 0x91ba46a, - 0x91ce46e, + 0x8f1e3c5, + 0x8f2a3c7, + 0x8f363ca, + 0x8f423cd, + 0x8f4e3d0, + 0x8f5a3d3, + 0x8f663d6, + 0x8f723d9, + 0x8f7e3dc, + 0x8f823df, + 0x8f8e3e0, + 0x8fa63e3, + 0x8faa3e9, + 0x8fba3ea, + 0x8fda3ee, + 0x8fde3f6, + 0x902e3f7, + 0x903240b, + 0x904640c, + 0x907a411, + 0x909a41e, + 0x909e426, + 0x90a6427, + 0x90ca429, + 0x90e2432, + 0x90fa438, + 0x911243e, + 0x913a444, + 0x914e44e, + 0x9166453, + 0x916a459, + 0x291b245a, + 0x91b646c, + 0x91e246d, + 0x91f2478, + 0x920647c, } -// max children 662 (capacity 1023) -// max text offset 32015 (capacity 32767) +// max children 669 (capacity 1023) +// max text offset 32017 (capacity 32767) // max text length 36 (capacity 63) -// max hi 9331 (capacity 16383) -// max lo 9326 (capacity 16383) +// max hi 9345 (capacity 16383) +// max lo 9340 (capacity 16383) diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/sys/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/sys/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go index dcbb14ef3..271055be0 100644 --- a/vendor/golang.org/x/sys/cpu/byteorder.go +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -46,6 +46,7 @@ func hostByteOrder() byteOrder { case "386", "amd64", "amd64p32", "alpha", "arm", "arm64", + "loong64", "mipsle", "mips64le", "mips64p32le", "nios2", "ppc64le", diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index b56886f26..83f112c4c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -106,8 +106,8 @@ var ARM64 struct { // ARM contains the supported CPU features of the current ARM (32-bit) platform. // All feature flags are false if: -// 1. the current platform is not arm, or -// 2. the current operating system is not Linux. +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. var ARM struct { _ CacheLinePad HasSWP bool // SWP instruction support diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 87dd5e302..bbaba18bc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -41,13 +41,10 @@ func archInit() { switch runtime.GOOS { case "freebsd": readARM64Registers() - case "linux", "netbsd": + case "linux", "netbsd", "openbsd": doinit() default: - // Most platforms don't seem to allow reading these registers. - // - // OpenBSD: - // See https://golang.org/issue/31746 + // Many platforms don't seem to allow reading these registers. setMinimalFeatures() } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index e363c7d13..a4605e6d1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -7,6 +7,7 @@ #include <cpuid.h> #include <stdint.h> +#include <x86intrin.h> // Need to wrap __get_cpuid_count because it's declared as static. int @@ -17,27 +18,21 @@ gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); } +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + // xgetbv reads the contents of an XCR (Extended Control Register) // specified in the ECX register into registers EDX:EAX. // Currently, the only supported value for XCR is 0. -// -// TODO: Replace with a better alternative: -// -// #include <xsaveintrin.h> -// -// #pragma GCC target("xsave") -// -// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { -// unsigned long long x = _xgetbv(0); -// *eax = x & 0xffffffff; -// *edx = (x >> 32) & 0xffffffff; -// } -// -// Note that _xgetbv is defined starting with GCC 8. void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { - __asm(" xorl %%ecx, %%ecx\n" - " xgetbv" - : "=a"(*eax), "=d"(*edx)); + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; } + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 000000000..0f57b05bd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 +// +build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 000000000..85b64d5cc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 000000000..054ba05d6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index f8c484f58..f3cde129b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !linux && !netbsd && arm64 -// +build !linux,!netbsd,arm64 +//go:build !linux && !netbsd && !openbsd && arm64 +// +build !linux,!netbsd,!openbsd,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 000000000..dd10eb79f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 +// +build !linux,riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go index 78192498d..b981cfbb4 100644 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -53,7 +53,7 @@ func relError(file, path string) error { // LookPath instead returns an error. func LookPath(file string) (string, error) { path, err := exec.LookPath(file) - if err != nil { + if err != nil && !isGo119ErrDot(err) { return "", err } if filepath.Base(file) == file && !filepath.IsAbs(path) { diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 000000000..6ab5f5089 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package execabs + +func isGo119ErrDot(err error) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 000000000..1e7a9ada0 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package execabs + +import "strings" + +func isGo119ErrDot(err error) bool { + // TODO: return errors.Is(err, exec.ErrDot) + return strings.Contains(err.Error(), "current directory") +} diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index 602473cba..a25223b8f 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -113,5 +113,6 @@ func (tv *Timeval) Nano() int64 { // use is a no-op, but the compiler cannot see that it is. // Calling use(p) ensures that p is kept live until that point. +// //go:noescape func use(p unsafe.Pointer) diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go index 723b1f400..d079d8116 100644 --- a/vendor/golang.org/x/sys/plan9/syscall_plan9.go +++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go @@ -115,6 +115,7 @@ func Write(fd int, p []byte) (n int, err error) { var ioSync int64 //sys fd2path(fd int, buf []byte) (err error) + func Fd2path(fd int) (path string, err error) { var buf [512]byte @@ -126,6 +127,7 @@ func Fd2path(fd int) (path string, err error) { } //sys pipe(p *[2]int32) (err error) + func Pipe(p []int) (err error) { if len(p) != 2 { return syscall.ErrorString("bad arg in system call") @@ -180,6 +182,7 @@ func (w Waitmsg) ExitStatus() int { } //sys await(s []byte) (n int, err error) + func Await(w *Waitmsg) (err error) { var buf [512]byte var f [5][]byte @@ -301,42 +304,49 @@ func Getgroups() (gids []int, err error) { } //sys open(path string, mode int) (fd int, err error) + func Open(path string, mode int) (fd int, err error) { fixwd() return open(path, mode) } //sys create(path string, mode int, perm uint32) (fd int, err error) + func Create(path string, mode int, perm uint32) (fd int, err error) { fixwd() return create(path, mode, perm) } //sys remove(path string) (err error) + func Remove(path string) error { fixwd() return remove(path) } //sys stat(path string, edir []byte) (n int, err error) + func Stat(path string, edir []byte) (n int, err error) { fixwd() return stat(path, edir) } //sys bind(name string, old string, flag int) (err error) + func Bind(name string, old string, flag int) (err error) { fixwd() return bind(name, old, flag) } //sys mount(fd int, afd int, old string, flag int, aname string) (err error) + func Mount(fd int, afd int, old string, flag int, aname string) (err error) { fixwd() return mount(fd, afd, old, flag, aname) } //sys wstat(path string, edir []byte) (err error) + func Wstat(path string, edir []byte) (err error) { fixwd() return wstat(path, edir) diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s new file mode 100644 index 000000000..d560019ea --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -0,0 +1,29 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc + +#include "textflag.h" + +// System call support for RISCV64 BSD + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s new file mode 100644 index 000000000..565357288 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && loong64 && gc +// +build linux +// +build loong64 +// +build gc + +#include "textflag.h" + + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + JAL runtime·entersyscall(SB) + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + JAL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + RET diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 4362f47e2..b0f2bc4ae 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go deleted file mode 100644 index 761db66ef..000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go deleted file mode 100644 index 070f44b65..000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8040720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8040720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go deleted file mode 100644 index 856dca325..000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix - -const ( - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - - // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go - IFF_SMART = 0x20 - IFT_FAITH = 0xf2 - IFT_IPXIP = 0xf9 - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IP_FAITH = 0x16 - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go deleted file mode 100644 index 946dcf3fc..000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 934af313c..15721a510 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -8,7 +8,6 @@ package unix import ( - "bytes" "unsafe" ) @@ -45,13 +44,7 @@ func NewIfreq(name string) (*Ifreq, error) { // Name returns the interface name associated with the Ifreq. func (ifr *Ifreq) Name() string { - // BytePtrToString requires a NULL terminator or the program may crash. If - // one is not present, just return the empty string. - if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) { - return "" - } - - return BytePtrToString(&ifr.raw.Ifrn[0]) + return ByteSliceToString(ifr.raw.Ifrn[:]) } // According to netdevice(7), only AF_INET addresses are returned for numerous diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index ee7362348..2cd0e9166 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -73,12 +73,12 @@ aix_ppc64) darwin_amd64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" + mkasm="go run mkasm.go" ;; darwin_arm64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" + mkasm="go run mkasm.go" ;; dragonfly_amd64) mkerrors="$mkerrors -m64" @@ -89,25 +89,30 @@ dragonfly_amd64) freebsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -arm" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; freebsd_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +freebsd_riscv64) + mkerrors="$mkerrors -m64" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; netbsd_386) @@ -227,5 +232,5 @@ esac if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi - if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi + if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi ) | $run diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 72f65a9af..2ab44aa65 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -128,6 +128,7 @@ includes_FreeBSD=' #include <sys/mount.h> #include <sys/wait.h> #include <sys/ioctl.h> +#include <sys/ptrace.h> #include <net/bpf.h> #include <net/if.h> #include <net/if_types.h> @@ -202,6 +203,7 @@ struct ltchars { #include <sys/timerfd.h> #include <sys/uio.h> #include <sys/xattr.h> +#include <linux/audit.h> #include <linux/bpf.h> #include <linux/can.h> #include <linux/can/error.h> @@ -215,6 +217,7 @@ struct ltchars { #include <linux/ethtool_netlink.h> #include <linux/falloc.h> #include <linux/fanotify.h> +#include <linux/fib_rules.h> #include <linux/filter.h> #include <linux/fs.h> #include <linux/fscrypt.h> @@ -294,6 +297,10 @@ struct ltchars { #define SOL_NETLINK 270 #endif +#ifndef SOL_SMC +#define SOL_SMC 286 +#endif + #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -528,7 +535,7 @@ ccflags="$@" $2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || + $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|PIOD|TFD)_/ || $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || @@ -552,6 +559,7 @@ ccflags="$@" $2 ~ /^CLONE_[A-Z_]+/ || $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && $2 ~ /^(BPF|DLT)_/ || + $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || @@ -574,7 +582,6 @@ ccflags="$@" $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || - $2 !~ /^AUDIT_RECORD_MAGIC/ && $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || @@ -613,6 +620,7 @@ ccflags="$@" $2 ~ /^OTP/ || $2 ~ /^MEM/ || $2 ~ /^WG/ || + $2 ~ /^FIB_RULE_/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index f2a114fc2..2db1b51e9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) { } //sys utimes(path string, times *[2]Timeval) (err error) + func Utimes(path string, tv []Timeval) error { if len(tv) != 2 { return EINVAL @@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error { } //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) + func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL @@ -215,14 +217,63 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { - // Recvmsg not implemented on AIX - return -1, -1, -1, ENOSYS +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = recvmsg(fd, &msg, flags); n == -1 { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + return } -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { - // SendmsgN not implemented on AIX - return -1, ENOSYS +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var dummy byte + var empty bool + if len(oob) > 0 { + // send at least one normal byte + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && empty { + n = 0 + } + return n, nil } func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { @@ -300,11 +351,13 @@ func direntNamlen(buf []byte) (uint64, bool) { } //sys getdirent(fd int, buf []byte) (n int, err error) + func Getdents(fd int, buf []byte) (n int, err error) { return getdirent(fd, buf) } //sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) + func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { var status _C_int var r Pid_t @@ -372,6 +425,7 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range + func Fsync(fd int) error { return fsyncRange(fd, O_SYNC, 0, 0) } @@ -536,6 +590,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Getsystemcfg(label int) (n uint64) //sys umount(target string) (err error) + func Unmount(target string, flags int) (err error) { if flags != 0 { // AIX doesn't have any flags for umount. diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 9c87c5f07..eda42671f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -325,27 +325,26 @@ func GetsockoptString(fd, level, opt int) (string, error) { //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } @@ -356,31 +355,32 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 09a25c653..4f87f16ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -393,6 +393,13 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } +func GetsockoptTCPConnectionInfo(fd, level, opt int) (*TCPConnectionInfo, error) { + var value TCPConnectionInfo + vallen := _Socklen(SizeofTCPConnectionInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { mib, err := sysctlmib(name, args...) if err != nil { @@ -504,6 +511,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) @@ -572,7 +580,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Nfssvc // Getfh // Quotactl -// Mount // Csops // Waitid // Add_profil diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index c61e27498..61c0d0de1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -125,11 +125,13 @@ func Pipe2(p []int, flags int) (err error) { } //sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) + func pread(fd int, p []byte, offset int64) (n int, err error) { return extpread(fd, p, 0, offset) } //sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) + func pwrite(fd int, p []byte, offset int64) (n int, err error) { return extpwrite(fd, p, 0, offset) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 6f6c510f4..de7c23e06 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -17,25 +17,12 @@ import ( "unsafe" ) -const ( - SYS_FSTAT_FREEBSD12 = 551 // { int fstat(int fd, _Out_ struct stat *sb); } - SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, \ - SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, \ - SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, \ - SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, \ - SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat( \ - SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, \ -) - // See https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html. var ( osreldateOnce sync.Once osreldate uint32 ) -// INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h -const _ino64First = 1200031 - func supportsABI(ver uint32) bool { osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) return osreldate >= ver @@ -159,38 +146,18 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var ( - _p0 unsafe.Pointer - bufsize uintptr - oldBuf []statfs_freebsd11_t - needsConvert bool + _p0 unsafe.Pointer + bufsize uintptr ) - if len(buf) > 0 { - if supportsABI(_ino64First) { - _p0 = unsafe.Pointer(&buf[0]) - bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) - } else { - n := len(buf) - oldBuf = make([]statfs_freebsd11_t, n) - _p0 = unsafe.Pointer(&oldBuf[0]) - bufsize = unsafe.Sizeof(statfs_freebsd11_t{}) * uintptr(n) - needsConvert = true - } + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - var sysno uintptr = SYS_GETFSSTAT - if supportsABI(_ino64First) { - sysno = SYS_GETFSSTAT_FREEBSD12 - } - r0, _, e1 := Syscall(sysno, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 } - if e1 == 0 && needsConvert { - for i := range oldBuf { - buf[i].convertFrom(&oldBuf[i]) - } - } return } @@ -245,87 +212,11 @@ func Uname(uname *Utsname) error { } func Stat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, 0) - } - err = stat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil + return Fstatat(AT_FDCWD, path, st, 0) } func Lstat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) - } - err = lstat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstat(fd int, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstat_freebsd12(fd, st) - } - err = fstat(fd, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstatat(fd int, path string, st *Stat_t, flags int) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(fd, path, st, flags) - } - err = fstatat(fd, path, &oldStat, flags) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Statfs(path string, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return statfs_freebsd12(path, st) - } - err = statfs(path, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil -} - -func Fstatfs(fd int, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return fstatfs_freebsd12(fd, st) - } - err = fstatfs(fd, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil + return Fstatat(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) } func Getdents(fd int, buf []byte) (n int, err error) { @@ -333,162 +224,25 @@ func Getdents(fd int, buf []byte) (n int, err error) { } func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - if supportsABI(_ino64First) { - if basep == nil || unsafe.Sizeof(*basep) == 8 { - return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep))) - } - // The freebsd12 syscall needs a 64-bit base. On 32-bit machines - // we can't just use the basep passed in. See #32498. - var base uint64 = uint64(*basep) - n, err = getdirentries_freebsd12(fd, buf, &base) - *basep = uintptr(base) - if base>>32 != 0 { - // We can't stuff the base back into a uintptr, so any - // future calls would be suspect. Generate an error. - // EIO is allowed by getdirentries. - err = EIO - } - return + if basep == nil || unsafe.Sizeof(*basep) == 8 { + return getdirentries(fd, buf, (*uint64)(unsafe.Pointer(basep))) } - - // The old syscall entries are smaller than the new. Use 1/4 of the original - // buffer size rounded up to DIRBLKSIZ (see /usr/src/lib/libc/sys/getdirentries.c). - oldBufLen := roundup(len(buf)/4, _dirblksiz) - oldBuf := make([]byte, oldBufLen) - n, err = getdirentries(fd, oldBuf, basep) - if err == nil && n > 0 { - n = convertFromDirents11(buf, oldBuf[:n]) + // The syscall needs a 64-bit base. On 32-bit machines + // we can't just use the basep passed in. See #32498. + var base uint64 = uint64(*basep) + n, err = getdirentries(fd, buf, &base) + *basep = uintptr(base) + if base>>32 != 0 { + // We can't stuff the base back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO } return } func Mknod(path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(AT_FDCWD, path, mode, dev) - } - oldDev = int(dev) - return mknod(path, mode, oldDev) -} - -func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(fd, path, mode, dev) - } - oldDev = int(dev) - return mknodat(fd, path, mode, oldDev) -} - -// round x to the nearest multiple of y, larger or equal to x. -// -// from /usr/include/sys/param.h Macros for counting and rounding. -// #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) -func roundup(x, y int) int { - return ((x + y - 1) / y) * y -} - -func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { - *s = Stat_t{ - Dev: uint64(old.Dev), - Ino: uint64(old.Ino), - Nlink: uint64(old.Nlink), - Mode: old.Mode, - Uid: old.Uid, - Gid: old.Gid, - Rdev: uint64(old.Rdev), - Atim: old.Atim, - Mtim: old.Mtim, - Ctim: old.Ctim, - Btim: old.Btim, - Size: old.Size, - Blocks: old.Blocks, - Blksize: old.Blksize, - Flags: old.Flags, - Gen: uint64(old.Gen), - } -} - -func (s *Statfs_t) convertFrom(old *statfs_freebsd11_t) { - *s = Statfs_t{ - Version: _statfsVersion, - Type: old.Type, - Flags: old.Flags, - Bsize: old.Bsize, - Iosize: old.Iosize, - Blocks: old.Blocks, - Bfree: old.Bfree, - Bavail: old.Bavail, - Files: old.Files, - Ffree: old.Ffree, - Syncwrites: old.Syncwrites, - Asyncwrites: old.Asyncwrites, - Syncreads: old.Syncreads, - Asyncreads: old.Asyncreads, - // Spare - Namemax: old.Namemax, - Owner: old.Owner, - Fsid: old.Fsid, - // Charspare - // Fstypename - // Mntfromname - // Mntonname - } - - sl := old.Fstypename[:] - n := clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Fstypename[:], old.Fstypename[:n]) - - sl = old.Mntfromname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntfromname[:], old.Mntfromname[:n]) - - sl = old.Mntonname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntonname[:], old.Mntonname[:n]) -} - -func convertFromDirents11(buf []byte, old []byte) int { - const ( - fixedSize = int(unsafe.Offsetof(Dirent{}.Name)) - oldFixedSize = int(unsafe.Offsetof(dirent_freebsd11{}.Name)) - ) - - dstPos := 0 - srcPos := 0 - for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { - var dstDirent Dirent - var srcDirent dirent_freebsd11 - - // If multiple direntries are written, sometimes when we reach the final one, - // we may have cap of old less than size of dirent_freebsd11. - copy((*[unsafe.Sizeof(srcDirent)]byte)(unsafe.Pointer(&srcDirent))[:], old[srcPos:]) - - reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) - if dstPos+reclen > len(buf) { - break - } - - dstDirent.Fileno = uint64(srcDirent.Fileno) - dstDirent.Off = 0 - dstDirent.Reclen = uint16(reclen) - dstDirent.Type = srcDirent.Type - dstDirent.Pad0 = 0 - dstDirent.Namlen = uint16(srcDirent.Namlen) - dstDirent.Pad1 = 0 - - copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen]) - copy(buf[dstPos:], (*[unsafe.Sizeof(dstDirent)]byte)(unsafe.Pointer(&dstDirent))[:]) - padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen] - for i := range padding { - padding[i] = 0 - } - - dstPos += int(dstDirent.Reclen) - srcPos += int(srcDirent.Reclen) - } - - return dstPos + return Mknodat(AT_FDCWD, path, mode, dev) } func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { @@ -501,31 +255,31 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ptrace(request int, pid int, addr uintptr, data int) (err error) func PtraceAttach(pid int) (err error) { - return ptrace(PTRACE_ATTACH, pid, 0, 0) + return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceCont(pid int, signal int) (err error) { - return ptrace(PTRACE_CONT, pid, 1, signal) + return ptrace(PT_CONTINUE, pid, 1, signal) } func PtraceDetach(pid int) (err error) { - return ptrace(PTRACE_DETACH, pid, 1, 0) + return ptrace(PT_DETACH, pid, 1, 0) } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) } func PtraceLwpEvents(pid int, enable int) (err error) { - return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) + return ptrace(PT_LWP_EVENTS, pid, 0, enable) } func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PTRACE_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) + return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -545,11 +299,11 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PTRACE_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) } func PtraceSingleStep(pid int) (err error) { - return ptrace(PTRACE_SINGLESTEP, pid, 1, 0) + return ptrace(PT_STEP, pid, 1, 0) } /* @@ -591,16 +345,12 @@ func PtraceSingleStep(pid int) (err error) { //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) -//sys fstat(fd int, stat *stat_freebsd11_t) (err error) -//sys fstat_freebsd12(fd int, stat *Stat_t) (err error) -//sys fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) -//sys fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) -//sys fstatfs(fd int, stat *statfs_freebsd11_t) (err error) -//sys fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) -//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) -//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) +//sys getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) @@ -622,13 +372,10 @@ func PtraceSingleStep(pid int) (err error) { //sys Link(path string, link string) (err error) //sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) -//sys lstat(path string, stat *stat_freebsd11_t) (err error) //sys Mkdir(path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) -//sys mknod(path string, mode uint32, dev int) (err error) -//sys mknodat(fd int, path string, mode uint32, dev int) (err error) -//sys mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) +//sys Mknodat(fd int, path string, mode uint32, dev uint64) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) @@ -658,9 +405,7 @@ func PtraceSingleStep(pid int) (err error) { //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) -//sys stat(path string, stat *stat_freebsd11_t) (err error) -//sys statfs(path string, stat *statfs_freebsd11_t) (err error) -//sys statfs_freebsd12(path string, stat *Statfs_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 342fc32b1..c3c4c698e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -57,11 +57,11 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) + return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index a32d5aa4a..82be61a2f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -57,11 +57,11 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) + return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 1e36d39ab..cd58f1026 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -58,6 +58,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index a09a1537b..d6f538f9e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -58,6 +58,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go new file mode 100644 index 000000000..8ea6e9610 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -0,0 +1,63 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 8d5f294c4..e48244a9c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -20,10 +20,9 @@ func bytes2iovec(bs [][]byte) []Iovec { for i, b := range bs { iovecs[i].SetLen(len(b)) if len(b) > 0 { - // somehow Iovec.Base on illumos is (*int8), not (*byte) - iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0])) + iovecs[i].Base = &b[0] } else { - iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero)) + iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) } } return iovecs diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index bda98498b..ecb0f27fb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -512,24 +512,24 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { // // Server example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 -// }) -// _ = Listen(fd, 1) -// nfd, sa, _ := Accept(fd) -// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) -// Read(nfd, buf) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 +// }) +// _ = Listen(fd, 1) +// nfd, sa, _ := Accept(fd) +// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) +// Read(nfd, buf) // // Client example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = Connect(fd, &SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 -// }) -// Write(fd, []byte(`hello`)) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = Connect(fd, &SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 +// }) +// Write(fd, []byte(`hello`)) type SockaddrRFCOMM struct { // Addr represents a bluetooth address, byte ordering is little-endian. Addr [6]uint8 @@ -556,12 +556,12 @@ func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { // The SockaddrCAN struct must be bound to the socket file descriptor // using Bind before the CAN socket can be used. // -// // Read one raw CAN frame -// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) -// addr := &SockaddrCAN{Ifindex: index} -// Bind(fd, addr) -// frame := make([]byte, 16) -// Read(fd, frame) +// // Read one raw CAN frame +// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) +// addr := &SockaddrCAN{Ifindex: index} +// Bind(fd, addr) +// frame := make([]byte, 16) +// Read(fd, frame) // // The full SocketCAN documentation can be found in the linux kernel // archives at: https://www.kernel.org/doc/Documentation/networking/can.txt @@ -632,13 +632,13 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // Here is an example of using an AF_ALG socket with SHA1 hashing. // The initial socket setup process is as follows: // -// // Open a socket to perform SHA1 hashing. -// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) -// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} -// unix.Bind(fd, addr) -// // Note: unix.Accept does not work at this time; must invoke accept() -// // manually using unix.Syscall. -// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) +// // Open a socket to perform SHA1 hashing. +// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) +// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} +// unix.Bind(fd, addr) +// // Note: unix.Accept does not work at this time; must invoke accept() +// // manually using unix.Syscall. +// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) // // Once a file descriptor has been returned from Accept, it may be used to // perform SHA1 hashing. The descriptor is not safe for concurrent use, but @@ -647,39 +647,39 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // When hashing a small byte slice or string, a single Write and Read may // be used: // -// // Assume hashfd is already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash an input string and read the results. Each Write discards -// // previous hash state. Read always reads the current state. -// b := make([]byte, 20) -// for i := 0; i < 2; i++ { -// io.WriteString(hash, "Hello, world.") -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// } -// // Output: -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // Assume hashfd is already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash an input string and read the results. Each Write discards +// // previous hash state. Read always reads the current state. +// b := make([]byte, 20) +// for i := 0; i < 2; i++ { +// io.WriteString(hash, "Hello, world.") +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// } +// // Output: +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 // // For hashing larger byte slices, or byte streams such as those read from // a file or socket, use Sendto with MSG_MORE to instruct the kernel to update // the hash digest instead of creating a new one for a given chunk and finalizing it. // -// // Assume hashfd and addr are already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash the contents of a file. -// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") -// b := make([]byte, 4096) -// for { -// n, err := f.Read(b) -// if err == io.EOF { -// break -// } -// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) -// } -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 +// // Assume hashfd and addr are already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash the contents of a file. +// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") +// b := make([]byte, 4096) +// for { +// n, err := f.Read(b) +// if err == io.EOF { +// break +// } +// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) +// } +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 // // For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html. type SockaddrALG struct { @@ -1499,18 +1499,13 @@ func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error //sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL //sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { - if len(p) == 0 { + if emptyIovecs(iov) { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1518,15 +1513,19 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn } // receive at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } @@ -1535,18 +1534,15 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn return } -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(ptr) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { - if len(p) == 0 { + empty = emptyIovecs(iov) + if empty { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1554,19 +1550,22 @@ func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags i } // send at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -1829,6 +1828,9 @@ func Dup2(oldfd, newfd int) error { //sys Fremovexattr(fd int, attr string) (err error) //sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) //sys Fsync(fd int) (err error) +//sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) +//sys Fsopen(fsName string, flags int) (fd int, err error) +//sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) @@ -1859,7 +1861,9 @@ func Getpgrp() (pid int) { //sys MemfdCreate(name string, flags int) (fd int, err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 @@ -2184,7 +2188,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { gid = Getgid() } - if uint32(gid) == st.Gid || isGroupMember(gid) { + if uint32(gid) == st.Gid || isGroupMember(int(st.Gid)) { fmode = (st.Mode >> 3) & 7 } else { fmode = st.Mode & 7 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index b945ab254..f5e9d6bef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -28,6 +28,7 @@ func Lstat(path string, stat *Stat_t) (err error) { return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) } +//sys MemfdSecret(flags int) (fd int, err error) //sys Pause() (err error) //sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 81db4833a..d83e2c657 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -22,6 +22,7 @@ import "unsafe" //sysnb getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) +//sys MemfdSecret(flags int) (fd int, err error) //sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go new file mode 100644 index 000000000..0b69c3eff --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -0,0 +1,226 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +import "unsafe" + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func timespecFromStatxTimestamp(x StatxTimestamp) Timespec { + return Timespec{ + Sec: x.Sec, + Nsec: int64(x.Nsec), + } +} + +func Fstatat(fd int, path string, stat *Stat_t, flags int) error { + var r Statx_t + // Do it the glibc way, add AT_NO_AUTOMOUNT. + if err := Statx(fd, path, AT_NO_AUTOMOUNT|flags, STATX_BASIC_STATS, &r); err != nil { + return err + } + + stat.Dev = Mkdev(r.Dev_major, r.Dev_minor) + stat.Ino = r.Ino + stat.Mode = uint32(r.Mode) + stat.Nlink = r.Nlink + stat.Uid = r.Uid + stat.Gid = r.Gid + stat.Rdev = Mkdev(r.Rdev_major, r.Rdev_minor) + // hope we don't get to process files so large to overflow these size + // fields... + stat.Size = int64(r.Size) + stat.Blksize = int32(r.Blksize) + stat.Blocks = int64(r.Blocks) + stat.Atim = timespecFromStatxTimestamp(r.Atime) + stat.Mtim = timespecFromStatxTimestamp(r.Mtime) + stat.Ctim = timespecFromStatxTimestamp(r.Ctime) + + return nil +} + +func Fstat(fd int, stat *Stat_t) (err error) { + return Fstatat(fd, "", stat, AT_EMPTY_PATH) +} + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, nil, rlim) + return +} + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, rlim, nil) + return +} + +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func (r *PtraceRegs) PC() uint64 { return r.Era } + +func (r *PtraceRegs) SetPC(era uint64) { r.Era = era } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + +func Pause() error { + _, err := ppoll(nil, 0, nil, nil) + return err +} + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) +} + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 8ff7adba0..925a748a3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -22,6 +22,7 @@ import "unsafe" //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) +//sys MemfdSecret(flags int) (fd int, err error) //sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 15d637d63..78daceb33 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -81,6 +81,7 @@ func Pipe(p []int) (err error) { } //sysnb pipe2(p *[2]_C_int, flags int) (err error) + func Pipe2(p []int, flags int) error { if len(p) != 2 { return EINVAL @@ -95,6 +96,7 @@ func Pipe2(p []int, flags int) error { } //sys Getdents(fd int, buf []byte) (n int, err error) + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go index 30f285343..1378489f8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go @@ -26,6 +26,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 5c2003cec..b5ec457cd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -451,26 +451,25 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_recvmsg -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); n == -1 { return } @@ -480,30 +479,31 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_sendmsg -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -618,6 +618,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Getpriority(which int, who int) (n int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Getuid() (uid int) //sys Kill(pid int, signum syscall.Signal) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 70508afc1..1ff5060b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -338,8 +338,13 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } var rsa RawSockaddrAny - n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa) + n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { from, err = anyToSockaddr(fd, &rsa) @@ -347,12 +352,42 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from return } +// RecvmsgBuffers receives a message from a socket using the recvmsg +// system call. The flags are passed to recvmsg. Any non-control data +// read is scattered into the buffers slices. The results are: +// - n is the number of non-control data read into bufs +// - oobn is the number of control data read into oob; this may be interpreted using [ParseSocketControlMessage] +// - recvflags is flags returned by recvmsg +// - from is the address of the sender +func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + var rsa RawSockaddrAny + n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa) + if err == nil && rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { _, err = SendmsgN(fd, p, oob, to, flags) return } func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } var ptr unsafe.Pointer var salen _Socklen if to != nil { @@ -361,7 +396,32 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) return 0, err } } - return sendmsgN(fd, p, oob, ptr, salen, flags) + return sendmsgN(fd, iov[:], oob, ptr, salen, flags) +} + +// SendmsgBuffers sends a message on a socket to an address using the sendmsg +// system call. The flags are passed to sendmsg. Any non-control data written +// is gathered from buffers. The function returns the number of bytes written +// to the socket. +func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + return sendmsgN(fd, iov, oob, ptr, salen, flags) } func Send(s int, buf []byte, flags int) (err error) { @@ -484,3 +544,13 @@ func Lutimes(path string, tv []Timeval) error { } return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) } + +// emptyIovec reports whether there are no bytes in the slice of Iovec. +func emptyIovecs(iov []Iovec) bool { + for i := range iov { + if iov[i].Len > 0 { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 440900112..f8c2c5138 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1179,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1189,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1196,6 +1208,60 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXMMREGS = 0x40 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXMMREGS = 0x41 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1320,10 +1386,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1414,6 +1482,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1472,22 +1541,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1496,12 +1583,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1541,6 +1634,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1554,7 +1648,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1787,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1936,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1998,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 64520d312..96310c3be 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,58 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1385,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1481,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1540,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1582,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1633,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1647,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1693,12 +1784,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1841,7 +1933,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1903,6 +1995,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 99e9a0e06..777b69def 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -362,7 +363,7 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 - DIOCGATTR = 0xc144648e + DIOCGATTR = 0xc148648e DIOCGDELETE = 0x80106488 DIOCGFLUSH = 0x20006487 DIOCGFRONTSTUFF = 0x40086486 @@ -377,7 +378,7 @@ const ( DIOCGSTRIPESIZE = 0x4008648b DIOCSKERNELDUMP = 0x804c6490 DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 - DIOCZONECMD = 0xc06c648f + DIOCZONECMD = 0xc078648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -407,7 +408,9 @@ const ( DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -417,6 +420,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -444,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -484,9 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x109 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -502,7 +508,9 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 @@ -526,15 +534,18 @@ const ( DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc @@ -554,6 +565,7 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc @@ -578,6 +590,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -585,11 +598,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -606,6 +620,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -647,6 +662,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -663,6 +679,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -719,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -799,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -837,6 +854,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -857,13 +875,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -875,6 +893,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -894,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -935,10 +955,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -948,6 +966,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -956,6 +975,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -972,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -983,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1071,10 +1094,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1103,6 +1128,7 @@ const ( NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1159,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1169,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1176,6 +1208,53 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETVFPREGS = 0x40 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETVFPREGS = 0x41 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1257,7 +1336,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1267,15 +1345,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SEEK_CUR = 0x1 SEEK_DATA = 0x3 SEEK_END = 0x2 @@ -1299,10 +1379,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1318,8 +1400,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc028698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1350,6 +1435,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1369,6 +1455,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1377,6 +1464,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1387,13 +1475,22 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1437,10 +1534,69 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1448,6 +1604,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1463,8 +1625,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1528,6 +1712,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1592,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1740,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1802,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 4c8377114..c557ac2db 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,51 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1378,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1474,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1533,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1575,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1626,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1640,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go new file mode 100644 index 000000000..341b4d962 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -0,0 +1,2148 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_HYPERV = 0x2b + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2b + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + ALTWERASE = 0x200 + B0 = 0x0 + B1000000 = 0xf4240 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1500000 = 0x16e360 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B2000000 = 0x1e8480 + B230400 = 0x38400 + B2400 = 0x960 + B2500000 = 0x2625a0 + B28800 = 0x7080 + B300 = 0x12c + B3000000 = 0x2dc6c0 + B3500000 = 0x3567e0 + B38400 = 0x9600 + B4000000 = 0x3d0900 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B500000 = 0x7a120 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4008427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x40184280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x80104282 + BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 + BIOCSETWF = 0x8010427b + BIOCSETZBUF = 0x80184281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + CAP_ACCEPT = 0x200000020000000 + CAP_ACL_CHECK = 0x400000000010000 + CAP_ACL_DELETE = 0x400000000020000 + CAP_ACL_GET = 0x400000000040000 + CAP_ACL_SET = 0x400000000080000 + CAP_ALL0 = 0x20007ffffffffff + CAP_ALL1 = 0x4000000001fffff + CAP_BIND = 0x200000040000000 + CAP_BINDAT = 0x200008000000400 + CAP_CHFLAGSAT = 0x200000000001400 + CAP_CONNECT = 0x200000080000000 + CAP_CONNECTAT = 0x200010000000400 + CAP_CREATE = 0x200000000000040 + CAP_EVENT = 0x400000000000020 + CAP_EXTATTR_DELETE = 0x400000000001000 + CAP_EXTATTR_GET = 0x400000000002000 + CAP_EXTATTR_LIST = 0x400000000004000 + CAP_EXTATTR_SET = 0x400000000008000 + CAP_FCHDIR = 0x200000000000800 + CAP_FCHFLAGS = 0x200000000001000 + CAP_FCHMOD = 0x200000000002000 + CAP_FCHMODAT = 0x200000000002400 + CAP_FCHOWN = 0x200000000004000 + CAP_FCHOWNAT = 0x200000000004400 + CAP_FCNTL = 0x200000000008000 + CAP_FCNTL_ALL = 0x78 + CAP_FCNTL_GETFL = 0x8 + CAP_FCNTL_GETOWN = 0x20 + CAP_FCNTL_SETFL = 0x10 + CAP_FCNTL_SETOWN = 0x40 + CAP_FEXECVE = 0x200000000000080 + CAP_FLOCK = 0x200000000010000 + CAP_FPATHCONF = 0x200000000020000 + CAP_FSCK = 0x200000000040000 + CAP_FSTAT = 0x200000000080000 + CAP_FSTATAT = 0x200000000080400 + CAP_FSTATFS = 0x200000000100000 + CAP_FSYNC = 0x200000000000100 + CAP_FTRUNCATE = 0x200000000000200 + CAP_FUTIMES = 0x200000000200000 + CAP_FUTIMESAT = 0x200000000200400 + CAP_GETPEERNAME = 0x200000100000000 + CAP_GETSOCKNAME = 0x200000200000000 + CAP_GETSOCKOPT = 0x200000400000000 + CAP_IOCTL = 0x400000000000080 + CAP_IOCTLS_ALL = 0x7fffffffffffffff + CAP_KQUEUE = 0x400000000100040 + CAP_KQUEUE_CHANGE = 0x400000000100000 + CAP_KQUEUE_EVENT = 0x400000000000040 + CAP_LINKAT_SOURCE = 0x200020000000400 + CAP_LINKAT_TARGET = 0x200000000400400 + CAP_LISTEN = 0x200000800000000 + CAP_LOOKUP = 0x200000000000400 + CAP_MAC_GET = 0x400000000000001 + CAP_MAC_SET = 0x400000000000002 + CAP_MKDIRAT = 0x200000000800400 + CAP_MKFIFOAT = 0x200000001000400 + CAP_MKNODAT = 0x200000002000400 + CAP_MMAP = 0x200000000000010 + CAP_MMAP_R = 0x20000000000001d + CAP_MMAP_RW = 0x20000000000001f + CAP_MMAP_RWX = 0x20000000000003f + CAP_MMAP_RX = 0x20000000000003d + CAP_MMAP_W = 0x20000000000001e + CAP_MMAP_WX = 0x20000000000003e + CAP_MMAP_X = 0x20000000000003c + CAP_PDGETPID = 0x400000000000200 + CAP_PDKILL = 0x400000000000800 + CAP_PDWAIT = 0x400000000000400 + CAP_PEELOFF = 0x200001000000000 + CAP_POLL_EVENT = 0x400000000000020 + CAP_PREAD = 0x20000000000000d + CAP_PWRITE = 0x20000000000000e + CAP_READ = 0x200000000000001 + CAP_RECV = 0x200000000000001 + CAP_RENAMEAT_SOURCE = 0x200000004000400 + CAP_RENAMEAT_TARGET = 0x200040000000400 + CAP_RIGHTS_VERSION = 0x0 + CAP_RIGHTS_VERSION_00 = 0x0 + CAP_SEEK = 0x20000000000000c + CAP_SEEK_TELL = 0x200000000000004 + CAP_SEM_GETVALUE = 0x400000000000004 + CAP_SEM_POST = 0x400000000000008 + CAP_SEM_WAIT = 0x400000000000010 + CAP_SEND = 0x200000000000002 + CAP_SETSOCKOPT = 0x200002000000000 + CAP_SHUTDOWN = 0x200004000000000 + CAP_SOCK_CLIENT = 0x200007780000003 + CAP_SOCK_SERVER = 0x200007f60000003 + CAP_SYMLINKAT = 0x200000008000400 + CAP_TTYHOOK = 0x400000000000100 + CAP_UNLINKAT = 0x200000010000400 + CAP_UNUSED0_44 = 0x200080000000000 + CAP_UNUSED0_57 = 0x300000000000000 + CAP_UNUSED1_22 = 0x400000000200000 + CAP_UNUSED1_57 = 0x500000000000000 + CAP_WRITE = 0x200000000000002 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x5 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_COARSE = 0xc + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_COARSE = 0xa + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DIOCGATTR = 0xc148648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGKERNELDUMP = 0xc0986492 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x80986491 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCSKERNELDUMP_FREEBSD12 = 0x80506490 + DIOCZONECMD = 0xc080648f + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 + DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_EPON = 0x103 + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 + DLT_IPFILTER = 0x74 + DLT_IPMB_KONTRON = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 + DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x114 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PKTAP = 0x102 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa + DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EHE_DEAD_PRIORITY = -0x1 + EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_PROCDESC = -0x8 + EVFILT_READ = -0x1 + EVFILT_SENDFILE = -0xc + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xd + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_FLAG2 = 0x4000 + EV_FORCEONESHOT = 0x100 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_NONE = -0xc8 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_ADD_SEALS = 0x13 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_GET_SEALS = 0x14 + F_ISUNIONSTACK = 0x15 + F_KINFO = 0x16 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_KNOWSEPOCH = 0x20 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_IEEE1394 = 0x90 + IFT_INFINIBAND = 0xc7 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_PPP = 0x17 + IFT_PROPVIRTUAL = 0x35 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_NETMASK_DEFAULT = 0xffffff00 + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDMULTI = 0x41 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FLOWID = 0x43 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOWTYPE = 0x44 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVFLOWID = 0x46 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRSSBUCKETID = 0x47 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RSSBUCKETID = 0x45 + IPV6_RSS_LISTEN_BUCKET = 0x42 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BINDMULTI = 0x19 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FLOWID = 0x5a + IP_FLOWTYPE = 0x5b + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVFLOWID = 0x5d + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b + IP_RECVRETOPTS = 0x6 + IP_RECVRSSBUCKETID = 0x5e + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSSBUCKETID = 0x5c + IP_RSS_LISTEN_BUCKET = 0x1a + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80000 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RESERVED0020 = 0x20 + MAP_RESERVED0040 = 0x40 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0xfc000000 + MFD_HUGE_SHIFT = 0x1a + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0x300d0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EMPTYDIR = 0x2000000000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_EXTLS = 0x4000000000 + MNT_EXTLSCERT = 0x8000000000 + MNT_EXTLSCERTUSER = 0x10000000000 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOCOVER = 0x1000000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0xad8d0807e + MNT_USER = 0x8000 + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff + MNT_WAIT = 0x1 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x80000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_NHGRP = 0x7 + NET_RT_NHOP = 0x6 + NFDBITS = 0x40 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_CLOSE = 0x100 + NOTE_CLOSE_WRITE = 0x200 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FILE_POLL = 0x2 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_OPEN = 0x80 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_READ = 0x400 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x1000000 + O_EMPTY_PATH = 0x2000000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_PATH = 0x400000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_VERIFY = 0x200000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_COREDUMP = 0x1d + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FIXEDMTU = 0x80000 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_BLACKHOLE = 0x40 + RT_DEFAULT_FIB = 0x0 + RT_DEFAULT_WEIGHT = 0x1 + RT_HAS_GW = 0x80 + RT_HAS_HEADER = 0x10 + RT_HAS_HEADER_BIT = 0x4 + RT_L2_ME = 0x4 + RT_L2_ME_BIT = 0x2 + RT_LLE_CACHE = 0x100 + RT_MAX_WEIGHT = 0xffffff + RT_MAY_LOOP = 0x8 + RT_MAY_LOOP_BIT = 0x3 + RT_REJECT = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_CREDS2 = 0x8 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e + SIOCGI2C = 0xc020693d + SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0x8020692c + SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFXMEDIA = 0xc030698b + SIOCGLANPCP = 0xc0206998 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCGTUNFIB = 0xc020695e + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSTUNFIB = 0x8020695f + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_INIT_RATE = 0x458 + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_USE_RACK_RR = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 + TCP_CA_NAME_MAX = 0x10 + TCP_CCALGOOPT = 0x41 + TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DEFER_OPTIONS = 0x470 + TCP_DELACK = 0x48 + TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 + TCP_FAST_RSM_HACK = 0x471 + TCP_FIN_IS_RST = 0x49 + TCP_FUNCTION_BLK = 0x2000 + TCP_FUNCTION_NAME_LEN_MAX = 0x20 + TCP_HDWR_RATE_CAP = 0x46a + TCP_HDWR_UP_ONLY = 0x46c + TCP_IDLE_REDUCE = 0x46 + TCP_INFO = 0x20 + TCP_IWND_NB = 0x2b + TCP_IWND_NSEG = 0x2c + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOGID_CNT = 0x2e + TCP_LOG_ID_LEN = 0x40 + TCP_LOG_LIMIT = 0x4a + TCP_LOG_TAG = 0x2f + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXPEAKRATE = 0x45 + TCP_MAXSEG = 0x2 + TCP_MAXUNACKTIME = 0x44 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NO_PRR = 0x462 + TCP_PACING_RATE_CAP = 0x46b + TCP_PCAP_IN = 0x1000 + TCP_PCAP_OUT = 0x800 + TCP_PERF_INFO = 0x4e + TCP_PROC_ACCOUNTING = 0x4c + TCP_RACK_ABC_VAL = 0x46d + TCP_RACK_CHEAT_NOT_CONF_RATE = 0x459 + TCP_RACK_DO_DETECTION = 0x449 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_FORCE_MSEG = 0x45d + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_GP_INCREASE_CA = 0x45a + TCP_RACK_GP_INCREASE_REC = 0x45c + TCP_RACK_GP_INCREASE_SS = 0x45b + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MBUF_QUEUE = 0x41a + TCP_RACK_MEASURE_CNT = 0x46f + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_NONRXT_CFG_RATE = 0x463 + TCP_RACK_NO_PUSH_AT_MAX = 0x466 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_RATE_CA = 0x45e + TCP_RACK_PACE_RATE_REC = 0x460 + TCP_RACK_PACE_RATE_SS = 0x45f + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PACE_TO_FILL = 0x467 + TCP_RACK_PACING_BETA = 0x472 + TCP_RACK_PACING_BETA_ECN = 0x473 + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROFILE = 0x469 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_RR_CONF = 0x459 + TCP_RACK_TIMER_SLOP = 0x474 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 + TCP_REC_ABC_VAL = 0x46e + TCP_REMOTE_UDP_ENCAPS_PORT = 0x47 + TCP_REUSPORT_LB_NUMA = 0x402 + TCP_REUSPORT_LB_NUMA_CURDOM = -0x1 + TCP_REUSPORT_LB_NUMA_NODOM = -0x2 + TCP_RXTLS_ENABLE = 0x29 + TCP_RXTLS_MODE = 0x2a + TCP_SHARED_CWND_ALLOWED = 0x4b + TCP_SHARED_CWND_ENABLE = 0x464 + TCP_SHARED_CWND_TIME_LIMIT = 0x468 + TCP_STATS = 0x21 + TCP_TIMELY_DYN_ADJ = 0x465 + TCP_TLS_MODE_IFNET = 0x2 + TCP_TLS_MODE_NONE = 0x0 + TCP_TLS_MODE_SW = 0x1 + TCP_TLS_MODE_TOE = 0x3 + TCP_TXTLS_ENABLE = 0x27 + TCP_TXTLS_MODE = 0x28 + TCP_USER_LOG = 0x30 + TCP_USE_CMP_ACKS = 0x4d + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x61) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 36a89c643..785d693eb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -140,6 +140,306 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUDIT_ADD = 0x3eb + AUDIT_ADD_RULE = 0x3f3 + AUDIT_ALWAYS = 0x2 + AUDIT_ANOM_ABEND = 0x6a5 + AUDIT_ANOM_CREAT = 0x6a7 + AUDIT_ANOM_LINK = 0x6a6 + AUDIT_ANOM_PROMISCUOUS = 0x6a4 + AUDIT_ARCH = 0xb + AUDIT_ARCH_AARCH64 = 0xc00000b7 + AUDIT_ARCH_ALPHA = 0xc0009026 + AUDIT_ARCH_ARCOMPACT = 0x4000005d + AUDIT_ARCH_ARCOMPACTBE = 0x5d + AUDIT_ARCH_ARCV2 = 0x400000c3 + AUDIT_ARCH_ARCV2BE = 0xc3 + AUDIT_ARCH_ARM = 0x40000028 + AUDIT_ARCH_ARMEB = 0x28 + AUDIT_ARCH_C6X = 0x4000008c + AUDIT_ARCH_C6XBE = 0x8c + AUDIT_ARCH_CRIS = 0x4000004c + AUDIT_ARCH_CSKY = 0x400000fc + AUDIT_ARCH_FRV = 0x5441 + AUDIT_ARCH_H8300 = 0x2e + AUDIT_ARCH_HEXAGON = 0xa4 + AUDIT_ARCH_I386 = 0x40000003 + AUDIT_ARCH_IA64 = 0xc0000032 + AUDIT_ARCH_LOONGARCH32 = 0x40000102 + AUDIT_ARCH_LOONGARCH64 = 0xc0000102 + AUDIT_ARCH_M32R = 0x58 + AUDIT_ARCH_M68K = 0x4 + AUDIT_ARCH_MICROBLAZE = 0xbd + AUDIT_ARCH_MIPS = 0x8 + AUDIT_ARCH_MIPS64 = 0x80000008 + AUDIT_ARCH_MIPS64N32 = 0xa0000008 + AUDIT_ARCH_MIPSEL = 0x40000008 + AUDIT_ARCH_MIPSEL64 = 0xc0000008 + AUDIT_ARCH_MIPSEL64N32 = 0xe0000008 + AUDIT_ARCH_NDS32 = 0x400000a7 + AUDIT_ARCH_NDS32BE = 0xa7 + AUDIT_ARCH_NIOS2 = 0x40000071 + AUDIT_ARCH_OPENRISC = 0x5c + AUDIT_ARCH_PARISC = 0xf + AUDIT_ARCH_PARISC64 = 0x8000000f + AUDIT_ARCH_PPC = 0x14 + AUDIT_ARCH_PPC64 = 0x80000015 + AUDIT_ARCH_PPC64LE = 0xc0000015 + AUDIT_ARCH_RISCV32 = 0x400000f3 + AUDIT_ARCH_RISCV64 = 0xc00000f3 + AUDIT_ARCH_S390 = 0x16 + AUDIT_ARCH_S390X = 0x80000016 + AUDIT_ARCH_SH = 0x2a + AUDIT_ARCH_SH64 = 0x8000002a + AUDIT_ARCH_SHEL = 0x4000002a + AUDIT_ARCH_SHEL64 = 0xc000002a + AUDIT_ARCH_SPARC = 0x2 + AUDIT_ARCH_SPARC64 = 0x8000002b + AUDIT_ARCH_TILEGX = 0xc00000bf + AUDIT_ARCH_TILEGX32 = 0x400000bf + AUDIT_ARCH_TILEPRO = 0x400000bc + AUDIT_ARCH_UNICORE = 0x4000006e + AUDIT_ARCH_X86_64 = 0xc000003e + AUDIT_ARCH_XTENSA = 0x5e + AUDIT_ARG0 = 0xc8 + AUDIT_ARG1 = 0xc9 + AUDIT_ARG2 = 0xca + AUDIT_ARG3 = 0xcb + AUDIT_AVC = 0x578 + AUDIT_AVC_PATH = 0x57a + AUDIT_BITMASK_SIZE = 0x40 + AUDIT_BIT_MASK = 0x8000000 + AUDIT_BIT_TEST = 0x48000000 + AUDIT_BPF = 0x536 + AUDIT_BPRM_FCAPS = 0x529 + AUDIT_CAPSET = 0x52a + AUDIT_CLASS_CHATTR = 0x2 + AUDIT_CLASS_CHATTR_32 = 0x3 + AUDIT_CLASS_DIR_WRITE = 0x0 + AUDIT_CLASS_DIR_WRITE_32 = 0x1 + AUDIT_CLASS_READ = 0x4 + AUDIT_CLASS_READ_32 = 0x5 + AUDIT_CLASS_SIGNAL = 0x8 + AUDIT_CLASS_SIGNAL_32 = 0x9 + AUDIT_CLASS_WRITE = 0x6 + AUDIT_CLASS_WRITE_32 = 0x7 + AUDIT_COMPARE_AUID_TO_EUID = 0x10 + AUDIT_COMPARE_AUID_TO_FSUID = 0xe + AUDIT_COMPARE_AUID_TO_OBJ_UID = 0x5 + AUDIT_COMPARE_AUID_TO_SUID = 0xf + AUDIT_COMPARE_EGID_TO_FSGID = 0x17 + AUDIT_COMPARE_EGID_TO_OBJ_GID = 0x4 + AUDIT_COMPARE_EGID_TO_SGID = 0x18 + AUDIT_COMPARE_EUID_TO_FSUID = 0x12 + AUDIT_COMPARE_EUID_TO_OBJ_UID = 0x3 + AUDIT_COMPARE_EUID_TO_SUID = 0x11 + AUDIT_COMPARE_FSGID_TO_OBJ_GID = 0x9 + AUDIT_COMPARE_FSUID_TO_OBJ_UID = 0x8 + AUDIT_COMPARE_GID_TO_EGID = 0x14 + AUDIT_COMPARE_GID_TO_FSGID = 0x15 + AUDIT_COMPARE_GID_TO_OBJ_GID = 0x2 + AUDIT_COMPARE_GID_TO_SGID = 0x16 + AUDIT_COMPARE_SGID_TO_FSGID = 0x19 + AUDIT_COMPARE_SGID_TO_OBJ_GID = 0x7 + AUDIT_COMPARE_SUID_TO_FSUID = 0x13 + AUDIT_COMPARE_SUID_TO_OBJ_UID = 0x6 + AUDIT_COMPARE_UID_TO_AUID = 0xa + AUDIT_COMPARE_UID_TO_EUID = 0xb + AUDIT_COMPARE_UID_TO_FSUID = 0xc + AUDIT_COMPARE_UID_TO_OBJ_UID = 0x1 + AUDIT_COMPARE_UID_TO_SUID = 0xd + AUDIT_CONFIG_CHANGE = 0x519 + AUDIT_CWD = 0x51b + AUDIT_DAEMON_ABORT = 0x4b2 + AUDIT_DAEMON_CONFIG = 0x4b3 + AUDIT_DAEMON_END = 0x4b1 + AUDIT_DAEMON_START = 0x4b0 + AUDIT_DEL = 0x3ec + AUDIT_DEL_RULE = 0x3f4 + AUDIT_DEVMAJOR = 0x64 + AUDIT_DEVMINOR = 0x65 + AUDIT_DIR = 0x6b + AUDIT_DM_CTRL = 0x53a + AUDIT_DM_EVENT = 0x53b + AUDIT_EGID = 0x6 + AUDIT_EOE = 0x528 + AUDIT_EQUAL = 0x40000000 + AUDIT_EUID = 0x2 + AUDIT_EVENT_LISTENER = 0x537 + AUDIT_EXE = 0x70 + AUDIT_EXECVE = 0x51d + AUDIT_EXIT = 0x67 + AUDIT_FAIL_PANIC = 0x2 + AUDIT_FAIL_PRINTK = 0x1 + AUDIT_FAIL_SILENT = 0x0 + AUDIT_FANOTIFY = 0x533 + AUDIT_FD_PAIR = 0x525 + AUDIT_FEATURE_BITMAP_ALL = 0x7f + AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT = 0x1 + AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME = 0x2 + AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND = 0x8 + AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH = 0x4 + AUDIT_FEATURE_BITMAP_FILTER_FS = 0x40 + AUDIT_FEATURE_BITMAP_LOST_RESET = 0x20 + AUDIT_FEATURE_BITMAP_SESSIONID_FILTER = 0x10 + AUDIT_FEATURE_CHANGE = 0x530 + AUDIT_FEATURE_LOGINUID_IMMUTABLE = 0x1 + AUDIT_FEATURE_ONLY_UNSET_LOGINUID = 0x0 + AUDIT_FEATURE_VERSION = 0x1 + AUDIT_FIELD_COMPARE = 0x6f + AUDIT_FILETYPE = 0x6c + AUDIT_FILTERKEY = 0xd2 + AUDIT_FILTER_ENTRY = 0x2 + AUDIT_FILTER_EXCLUDE = 0x5 + AUDIT_FILTER_EXIT = 0x4 + AUDIT_FILTER_FS = 0x6 + AUDIT_FILTER_PREPEND = 0x10 + AUDIT_FILTER_TASK = 0x1 + AUDIT_FILTER_TYPE = 0x5 + AUDIT_FILTER_URING_EXIT = 0x7 + AUDIT_FILTER_USER = 0x0 + AUDIT_FILTER_WATCH = 0x3 + AUDIT_FIRST_KERN_ANOM_MSG = 0x6a4 + AUDIT_FIRST_USER_MSG = 0x44c + AUDIT_FIRST_USER_MSG2 = 0x834 + AUDIT_FSGID = 0x8 + AUDIT_FSTYPE = 0x1a + AUDIT_FSUID = 0x4 + AUDIT_GET = 0x3e8 + AUDIT_GET_FEATURE = 0x3fb + AUDIT_GID = 0x5 + AUDIT_GREATER_THAN = 0x20000000 + AUDIT_GREATER_THAN_OR_EQUAL = 0x60000000 + AUDIT_INODE = 0x66 + AUDIT_INTEGRITY_DATA = 0x708 + AUDIT_INTEGRITY_EVM_XATTR = 0x70e + AUDIT_INTEGRITY_HASH = 0x70b + AUDIT_INTEGRITY_METADATA = 0x709 + AUDIT_INTEGRITY_PCR = 0x70c + AUDIT_INTEGRITY_POLICY_RULE = 0x70f + AUDIT_INTEGRITY_RULE = 0x70d + AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_IPC = 0x517 + AUDIT_IPC_SET_PERM = 0x51f + AUDIT_KERNEL = 0x7d0 + AUDIT_KERNEL_OTHER = 0x524 + AUDIT_KERN_MODULE = 0x532 + AUDIT_LAST_FEATURE = 0x1 + AUDIT_LAST_KERN_ANOM_MSG = 0x707 + AUDIT_LAST_USER_MSG = 0x4af + AUDIT_LAST_USER_MSG2 = 0xbb7 + AUDIT_LESS_THAN = 0x10000000 + AUDIT_LESS_THAN_OR_EQUAL = 0x50000000 + AUDIT_LIST = 0x3ea + AUDIT_LIST_RULES = 0x3f5 + AUDIT_LOGIN = 0x3ee + AUDIT_LOGINUID = 0x9 + AUDIT_LOGINUID_SET = 0x18 + AUDIT_MAC_CALIPSO_ADD = 0x58a + AUDIT_MAC_CALIPSO_DEL = 0x58b + AUDIT_MAC_CIPSOV4_ADD = 0x57f + AUDIT_MAC_CIPSOV4_DEL = 0x580 + AUDIT_MAC_CONFIG_CHANGE = 0x57d + AUDIT_MAC_IPSEC_ADDSA = 0x583 + AUDIT_MAC_IPSEC_ADDSPD = 0x585 + AUDIT_MAC_IPSEC_DELSA = 0x584 + AUDIT_MAC_IPSEC_DELSPD = 0x586 + AUDIT_MAC_IPSEC_EVENT = 0x587 + AUDIT_MAC_MAP_ADD = 0x581 + AUDIT_MAC_MAP_DEL = 0x582 + AUDIT_MAC_POLICY_LOAD = 0x57b + AUDIT_MAC_STATUS = 0x57c + AUDIT_MAC_UNLBL_ALLOW = 0x57e + AUDIT_MAC_UNLBL_STCADD = 0x588 + AUDIT_MAC_UNLBL_STCDEL = 0x589 + AUDIT_MAKE_EQUIV = 0x3f7 + AUDIT_MAX_FIELDS = 0x40 + AUDIT_MAX_FIELD_COMPARE = 0x19 + AUDIT_MAX_KEY_LEN = 0x100 + AUDIT_MESSAGE_TEXT_MAX = 0x2170 + AUDIT_MMAP = 0x52b + AUDIT_MQ_GETSETATTR = 0x523 + AUDIT_MQ_NOTIFY = 0x522 + AUDIT_MQ_OPEN = 0x520 + AUDIT_MQ_SENDRECV = 0x521 + AUDIT_MSGTYPE = 0xc + AUDIT_NEGATE = 0x80000000 + AUDIT_NETFILTER_CFG = 0x52d + AUDIT_NETFILTER_PKT = 0x52c + AUDIT_NEVER = 0x0 + AUDIT_NLGRP_MAX = 0x1 + AUDIT_NOT_EQUAL = 0x30000000 + AUDIT_NR_FILTERS = 0x8 + AUDIT_OBJ_GID = 0x6e + AUDIT_OBJ_LEV_HIGH = 0x17 + AUDIT_OBJ_LEV_LOW = 0x16 + AUDIT_OBJ_PID = 0x526 + AUDIT_OBJ_ROLE = 0x14 + AUDIT_OBJ_TYPE = 0x15 + AUDIT_OBJ_UID = 0x6d + AUDIT_OBJ_USER = 0x13 + AUDIT_OPENAT2 = 0x539 + AUDIT_OPERATORS = 0x78000000 + AUDIT_PATH = 0x516 + AUDIT_PERM = 0x6a + AUDIT_PERM_ATTR = 0x8 + AUDIT_PERM_EXEC = 0x1 + AUDIT_PERM_READ = 0x4 + AUDIT_PERM_WRITE = 0x2 + AUDIT_PERS = 0xa + AUDIT_PID = 0x0 + AUDIT_POSSIBLE = 0x1 + AUDIT_PPID = 0x12 + AUDIT_PROCTITLE = 0x52f + AUDIT_REPLACE = 0x531 + AUDIT_SADDR_FAM = 0x71 + AUDIT_SECCOMP = 0x52e + AUDIT_SELINUX_ERR = 0x579 + AUDIT_SESSIONID = 0x19 + AUDIT_SET = 0x3e9 + AUDIT_SET_FEATURE = 0x3fa + AUDIT_SGID = 0x7 + AUDIT_SID_UNSET = 0xffffffff + AUDIT_SIGNAL_INFO = 0x3f2 + AUDIT_SOCKADDR = 0x51a + AUDIT_SOCKETCALL = 0x518 + AUDIT_STATUS_BACKLOG_LIMIT = 0x10 + AUDIT_STATUS_BACKLOG_WAIT_TIME = 0x20 + AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL = 0x80 + AUDIT_STATUS_ENABLED = 0x1 + AUDIT_STATUS_FAILURE = 0x2 + AUDIT_STATUS_LOST = 0x40 + AUDIT_STATUS_PID = 0x4 + AUDIT_STATUS_RATE_LIMIT = 0x8 + AUDIT_SUBJ_CLR = 0x11 + AUDIT_SUBJ_ROLE = 0xe + AUDIT_SUBJ_SEN = 0x10 + AUDIT_SUBJ_TYPE = 0xf + AUDIT_SUBJ_USER = 0xd + AUDIT_SUCCESS = 0x68 + AUDIT_SUID = 0x3 + AUDIT_SYSCALL = 0x514 + AUDIT_SYSCALL_CLASSES = 0x10 + AUDIT_TIME_ADJNTPVAL = 0x535 + AUDIT_TIME_INJOFFSET = 0x534 + AUDIT_TRIM = 0x3f6 + AUDIT_TTY = 0x527 + AUDIT_TTY_GET = 0x3f8 + AUDIT_TTY_SET = 0x3f9 + AUDIT_UID = 0x1 + AUDIT_UID_UNSET = 0xffffffff + AUDIT_UNUSED_BITS = 0x7fffc00 + AUDIT_URINGOP = 0x538 + AUDIT_USER = 0x3ed + AUDIT_USER_AVC = 0x453 + AUDIT_USER_TTY = 0x464 + AUDIT_VERSION_BACKLOG_LIMIT = 0x1 + AUDIT_VERSION_BACKLOG_WAIT_TIME = 0x2 + AUDIT_VERSION_LATEST = 0x7f + AUDIT_WATCH = 0x69 + AUDIT_WATCH_INS = 0x3ef + AUDIT_WATCH_LIST = 0x3f1 + AUDIT_WATCH_REM = 0x3f0 AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B110 = 0x3 @@ -184,6 +484,7 @@ const ( BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_KPROBE_MULTI_RETURN = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -191,6 +492,8 @@ const ( BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 + BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 + BPF_F_XDP_HAS_FRAGS = 0x20 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -393,9 +696,11 @@ const ( CAP_SYS_TIME = 0x19 CAP_SYS_TTY_CONFIG = 0x1a CAP_WAKE_ALARM = 0x23 + CEPH_SUPER_MAGIC = 0xc36400 CFLUSH = 0xf CGROUP2_SUPER_MAGIC = 0x63677270 CGROUP_SUPER_MAGIC = 0x27e0eb + CIFS_SUPER_MAGIC = 0xff534d42 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 CLOCK_DEFAULT = 0x0 @@ -515,9 +820,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2021-03-22)" + DM_VERSION_EXTRA = "-ioctl (2022-02-22)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2d + DM_VERSION_MINOR = 0x2e DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -533,6 +838,55 @@ const ( EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EM_386 = 0x3 + EM_486 = 0x6 + EM_68K = 0x4 + EM_860 = 0x7 + EM_88K = 0x5 + EM_AARCH64 = 0xb7 + EM_ALPHA = 0x9026 + EM_ALTERA_NIOS2 = 0x71 + EM_ARCOMPACT = 0x5d + EM_ARCV2 = 0xc3 + EM_ARM = 0x28 + EM_BLACKFIN = 0x6a + EM_BPF = 0xf7 + EM_CRIS = 0x4c + EM_CSKY = 0xfc + EM_CYGNUS_M32R = 0x9041 + EM_CYGNUS_MN10300 = 0xbeef + EM_FRV = 0x5441 + EM_H8_300 = 0x2e + EM_HEXAGON = 0xa4 + EM_IA_64 = 0x32 + EM_LOONGARCH = 0x102 + EM_M32 = 0x1 + EM_M32R = 0x58 + EM_MICROBLAZE = 0xbd + EM_MIPS = 0x8 + EM_MIPS_RS3_LE = 0xa + EM_MIPS_RS4_BE = 0xa + EM_MN10300 = 0x59 + EM_NDS32 = 0xa7 + EM_NONE = 0x0 + EM_OPENRISC = 0x5c + EM_PARISC = 0xf + EM_PPC = 0x14 + EM_PPC64 = 0x15 + EM_RISCV = 0xf3 + EM_S390 = 0x16 + EM_S390_OLD = 0xa390 + EM_SH = 0x2a + EM_SPARC = 0x2 + EM_SPARC32PLUS = 0x12 + EM_SPARCV9 = 0x2b + EM_SPU = 0x17 + EM_TILEGX = 0xbf + EM_TILEPRO = 0xbc + EM_TI_C6000 = 0x8c + EM_UNICORE = 0x6e + EM_X86_64 = 0x3e + EM_XTENSA = 0x5e ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -710,6 +1064,7 @@ const ( ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be ETH_P_ERSPAN2 = 0x22eb + ETH_P_ETHERCAT = 0x88a4 ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -747,6 +1102,7 @@ const ( ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 ETH_P_PREAUTH = 0x88c7 + ETH_P_PROFINET = 0x8892 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -784,6 +1140,7 @@ const ( EV_SYN = 0x0 EV_VERSION = 0x10001 EXABYTE_ENABLE_NEST = 0xf0 + EXFAT_SUPER_MAGIC = 0x2011bab0 EXT2_SUPER_MAGIC = 0xef53 EXT3_SUPER_MAGIC = 0xef53 EXT4_SUPER_MAGIC = 0xef53 @@ -826,12 +1183,15 @@ const ( FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc + FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 FAN_MARK_ADD = 0x1 FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_EVICTABLE = 0x200 FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FLUSH = 0x80 FAN_MARK_IGNORED_MASK = 0x20 @@ -854,17 +1214,27 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 + FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 + FAN_REPORT_TARGET_FID = 0x1000 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 + FIB_RULE_DEV_DETACHED = 0x8 + FIB_RULE_FIND_SADDR = 0x10000 + FIB_RULE_IIF_DETACHED = 0x8 + FIB_RULE_INVERT = 0x2 + FIB_RULE_OIF_DETACHED = 0x10 + FIB_RULE_PERMANENT = 0x1 + FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" @@ -927,6 +1297,7 @@ const ( FS_VERITY_METADATA_TYPE_DESCRIPTOR = 0x2 FS_VERITY_METADATA_TYPE_MERKLE_TREE = 0x1 FS_VERITY_METADATA_TYPE_SIGNATURE = 0x3 + FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -1039,7 +1410,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa + IFA_MAX = 0xb IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -1294,6 +1665,7 @@ const ( KEXEC_ARCH_ARM = 0x280000 KEXEC_ARCH_DEFAULT = 0x0 KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_LOONGARCH = 0x1020000 KEXEC_ARCH_MASK = 0xffff0000 KEXEC_ARCH_MIPS = 0x80000 KEXEC_ARCH_MIPS_LE = 0xa0000 @@ -1386,6 +1758,7 @@ const ( LANDLOCK_ACCESS_FS_MAKE_SYM = 0x1000 LANDLOCK_ACCESS_FS_READ_DIR = 0x8 LANDLOCK_ACCESS_FS_READ_FILE = 0x4 + LANDLOCK_ACCESS_FS_REFER = 0x2000 LANDLOCK_ACCESS_FS_REMOVE_DIR = 0x10 LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 @@ -1495,6 +1868,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 MOUNT_ATTR_IDMAP = 0x100000 @@ -1740,6 +2114,7 @@ const ( NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_BULK = 0x200 NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 @@ -1849,6 +2224,9 @@ const ( PERF_MEM_BLK_NA = 0x1 PERF_MEM_BLK_SHIFT = 0x28 PERF_MEM_HOPS_0 = 0x1 + PERF_MEM_HOPS_1 = 0x2 + PERF_MEM_HOPS_2 = 0x3 + PERF_MEM_HOPS_3 = 0x4 PERF_MEM_HOPS_SHIFT = 0x2b PERF_MEM_LOCK_LOCKED = 0x2 PERF_MEM_LOCK_NA = 0x1 @@ -2052,6 +2430,13 @@ const ( PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SET_VMA = 0x53564d41 + PR_SET_VMA_ANON_NAME = 0x0 + PR_SME_GET_VL = 0x40 + PR_SME_SET_VL = 0x3f + PR_SME_SET_VL_ONEXEC = 0x40000 + PR_SME_VL_INHERIT = 0x20000 + PR_SME_VL_LEN_MASK = 0xffff PR_SPEC_DISABLE = 0x4 PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 @@ -2204,8 +2589,9 @@ const ( RTC_FEATURE_ALARM = 0x0 RTC_FEATURE_ALARM_RES_2S = 0x3 RTC_FEATURE_ALARM_RES_MINUTE = 0x1 + RTC_FEATURE_ALARM_WAKEUP_ONLY = 0x7 RTC_FEATURE_BACKUP_SWITCH_MODE = 0x6 - RTC_FEATURE_CNT = 0x7 + RTC_FEATURE_CNT = 0x8 RTC_FEATURE_CORRECTION = 0x5 RTC_FEATURE_NEED_WEEK_DAY = 0x2 RTC_FEATURE_UPDATE_INTERRUPT = 0x4 @@ -2279,6 +2665,7 @@ const ( RTM_DELRULE = 0x21 RTM_DELTCLASS = 0x29 RTM_DELTFILTER = 0x2d + RTM_DELTUNNEL = 0x79 RTM_DELVLAN = 0x71 RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 @@ -2311,8 +2698,9 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e + RTM_GETTUNNEL = 0x7a RTM_GETVLAN = 0x72 - RTM_MAX = 0x77 + RTM_MAX = 0x7b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -2336,11 +2724,13 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x1a - RTM_NR_MSGTYPES = 0x68 + RTM_NEWTUNNEL = 0x78 + RTM_NR_FAMILIES = 0x1b + RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 + RTM_SETSTATS = 0x5f RTNH_ALIGNTO = 0x4 RTNH_COMPARE_MASK = 0x59 RTNH_F_DEAD = 0x1 @@ -2509,6 +2899,7 @@ const ( SMART_STATUS = 0xda SMART_WRITE_LOG_SECTOR = 0xd6 SMART_WRITE_THRESHOLDS = 0xd7 + SMB2_SUPER_MAGIC = 0xfe534d42 SMB_SUPER_MAGIC = 0x517b SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 @@ -2520,6 +2911,9 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_SNDBUF_LOCK = 0x1 + SOCK_TXREHASH_DEFAULT = 0xff + SOCK_TXREHASH_DISABLED = 0x0 + SOCK_TXREHASH_ENABLED = 0x1 SOL_AAL = 0x109 SOL_ALG = 0x117 SOL_ATM = 0x108 @@ -2535,6 +2929,8 @@ const ( SOL_IUCV = 0x115 SOL_KCM = 0x119 SOL_LLC = 0x10c + SOL_MCTP = 0x11d + SOL_MPTCP = 0x11c SOL_NETBEUI = 0x10b SOL_NETLINK = 0x10e SOL_NFC = 0x118 @@ -2544,6 +2940,7 @@ const ( SOL_RAW = 0xff SOL_RDS = 0x114 SOL_RXRPC = 0x110 + SOL_SMC = 0x11e SOL_TCP = 0x6 SOL_TIPC = 0x10f SOL_TLS = 0x11a @@ -2650,7 +3047,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xa + TASKSTATS_VERSION = 0xd TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 234fd4a5d..36c0dfc7c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -m32 +// mkerrors.sh -Wall -Werror -static -I/tmp/386/include -m32 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -350,6 +351,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 58619b758..4ff942703 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -m64 +// mkerrors.sh -Wall -Werror -static -I/tmp/amd64/include -m64 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go package unix @@ -327,6 +327,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3a64ff59d..3eaa0fb78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/arm/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go package unix @@ -333,6 +333,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -357,6 +358,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index abe0b9257..d7995bdc3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/arm64/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go package unix @@ -323,6 +323,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -347,6 +348,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 @@ -511,6 +513,7 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + ZA_MAGIC = 0x54366345 _HIDIOCGRAWNAME = 0x80804804 _HIDIOCGRAWPHYS = 0x80404805 _HIDIOCGRAWUNIQ = 0x80404808 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go new file mode 100644 index 000000000..928e24c20 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -0,0 +1,818 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/loong64/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go + +package unix + +import "syscall" + +const ( + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d + FLUSHO = 0x1000 + FPU_CTX_MAGIC = 0x46505501 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + OTPERASE = 0x400c4d19 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 + SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 +) + +// Errors +const ( + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + ECANCELED = syscall.Errno(0x7d) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EISCONN = syscall.Errno(0x6a) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTCONN = syscall.Errno(0x6b) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTUNIQ = syscall.Errno(0x4c) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPFNOSUPPORT = syscall.Errno(0x60) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGIO = syscall.Signal(0x1d) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 14d7a8439..179bffb47 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 99e7c4ac0..1fba17bd7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 496364c33..b77dde315 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips64le/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 3e4083085..78c6c751b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mipsle/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1151a7dfa..1c0d31f0b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go package unix @@ -381,6 +381,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -405,6 +406,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index ed17f249e..959dd9bb8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go package unix @@ -385,6 +385,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index d84a37c1a..5a873cdbc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64le/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go package unix @@ -385,6 +385,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 5cafba83f..e336d141e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/riscv64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go package unix @@ -314,6 +314,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -338,6 +339,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 6d122da41..390c01d92 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/s390x/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go package unix @@ -389,6 +389,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -413,6 +414,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 6bd19e51d..98a6e5f11 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/sparc64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go package unix @@ -380,6 +380,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 + SO_RCVMARK = 0x54 SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 @@ -404,6 +405,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x42 SO_TIMESTAMPNS_OLD = 0x21 SO_TIMESTAMP_NEW = 0x46 + SO_TXREHASH = 0x53 SO_TXTIME = 0x3f SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x25 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s index d6c3e25c0..f5bb40eda 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s @@ -1,4 +1,4 @@ -// go run mkasm_darwin.go amd64 +// go run mkasm.go darwin amd64 // Code generated by the command above; DO NOT EDIT. //go:build go1.13 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 879376589..467deed76 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8da90cf0e..b41467a0e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -1,4 +1,4 @@ -// go run mkasm_darwin.go amd64 +// go run mkasm.go darwin amd64 // Code generated by the command above; DO NOT EDIT. //go:build go1.12 @@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s index 357989722..0c3f76bc2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s @@ -1,4 +1,4 @@ -// go run mkasm_darwin.go arm64 +// go run mkasm.go darwin arm64 // Code generated by the command above; DO NOT EDIT. //go:build go1.13 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index f47eedd5a..35938d34f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 4d26f7d01..e1f9204a2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -1,4 +1,4 @@ -// go run mkasm_darwin.go arm64 +// go run mkasm.go darwin arm64 // Code generated by the command above; DO NOT EDIT. //go:build go1.12 @@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e9d9997ee..039c4aa06 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1250,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index edd373b1a..0535d3cfd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1250,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1265,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 82e9764b2..1018b5221 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -351,22 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -404,6 +388,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data int) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1250,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, uintptr(dev), uintptr(dev>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index a6479acd1..3802f4b37 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1250,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1265,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go new file mode 100644 index 000000000..8a2db7da9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -0,0 +1,1889 @@ +// go run mksyscall.go -tags freebsd,riscv64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build freebsd && riscv64 +// +build freebsd,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CapEnter() (err error) { + _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsLimit(fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 78d74520b..bc4a27531 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -828,6 +828,49 @@ func Fsync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) { + r0, _, e1 := Syscall(SYS_FSMOUNT, uintptr(fd), uintptr(flags), uintptr(mountAttrs)) + fsfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsopen(fsName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSOPEN, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSPICK, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1205,6 +1248,26 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fromPathName) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(toPathName) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOVE_MOUNT, uintptr(fromDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(toDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1215,6 +1278,22 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func OpenTree(dfd int, fileName string, flags uint) (r int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fileName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN_TREE, uintptr(dfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + r = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) fd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index c947a4d10..2a0c4aa6a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -215,6 +215,17 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pause() (err error) { _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index dd15284d8..9f8c24e43 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -180,6 +180,17 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go new file mode 100644 index 000000000..523f2ba03 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -0,0 +1,527 @@ +// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build linux && loong64 +// +build linux,loong64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index a1a9bcbbd..1239cc2de 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -180,6 +180,17 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index d12f4fbfe..fdf53f8da 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -66,6 +66,7 @@ import ( //go:cgo_import_dynamic libc_getpriority getpriority "libc.so" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" //go:cgo_import_dynamic libc_getrusage getrusage "libc.so" +//go:cgo_import_dynamic libc_getsid getsid "libc.so" //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" //go:cgo_import_dynamic libc_getuid getuid "libc.so" //go:cgo_import_dynamic libc_kill kill "libc.so" @@ -202,6 +203,7 @@ import ( //go:linkname procGetpriority libc_getpriority //go:linkname procGetrlimit libc_getrlimit //go:linkname procGetrusage libc_getrusage +//go:linkname procGetsid libc_getsid //go:linkname procGettimeofday libc_gettimeofday //go:linkname procGetuid libc_getuid //go:linkname procKill libc_kill @@ -339,6 +341,7 @@ var ( procGetpriority, procGetrlimit, procGetrusage, + procGetsid, procGettimeofday, procGetuid, procKill, @@ -1044,6 +1047,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + sid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 59d5dfc20..4e0d96107 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 342d471d2..01636b838 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index e2e3d72c5..ad99bc106 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 61ad5ca3c..89dcc4274 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go new file mode 100644 index 000000000..ee37aaa0c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -0,0 +1,394 @@ +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_BREAK = 17 // { caddr_t break(char *nsize); } + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } + SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } + SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } + SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); } + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); } + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); } + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); } + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); } + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); } + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); } + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); } + SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } + SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } + SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } + SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } + SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); } + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); } + SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } + SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } + SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } + SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); } + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); } + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); } + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); } + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); } + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); } + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); } + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); } + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); } + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); } + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); } + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); } + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); } + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); } + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); } + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); } + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); } + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); } + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); } + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); } + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); } + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); } + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); } + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); } + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); } + SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); } + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); } + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } + SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } + SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index cac1f758b..c9c4ad031 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/386/include -m32 /tmp/386/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -446,4 +446,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f327e4a0b..12ff3417c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/amd64/include -m64 /tmp/amd64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -368,4 +368,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index fb06a08d4..c3fb5e77a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm/include /tmp/arm/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -410,4 +410,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 58285646e..358c847a4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm64/include -fsigned-char /tmp/arm64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -313,4 +313,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go new file mode 100644 index 000000000..81c4849b1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -0,0 +1,311 @@ +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/loong64/include /tmp/loong64/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 3b0418e68..202a57e90 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips/include /tmp/mips/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -430,4 +430,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 4446 SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 314ebf166..1fbceb52d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64/include /tmp/mips64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -360,4 +360,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 5446 SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index b8fbb937a..b4ffb7a20 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64le/include /tmp/mips64le/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -360,4 +360,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 5446 SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index ee309b2ba..867985f9b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mipsle/include /tmp/mipsle/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -430,4 +430,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 4446 SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index ac3748104..a8cce69ed 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc/include /tmp/ppc/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -437,4 +437,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5aa472111..d44c5b39d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64/include /tmp/ppc64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -409,4 +409,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0793ac1a6..4214dd9c0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64le/include /tmp/ppc64le/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -409,4 +409,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a520962e3..3e594a8c0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/riscv64/include /tmp/riscv64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -309,6 +309,8 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index d1738586b..7ea465204 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/s390x/include -fsigned-char /tmp/s390x/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -374,4 +374,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index dfd5660f9..92f628ef4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/sparc64/include /tmp/sparc64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -388,4 +388,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 885842c0e..e2a64f099 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -366,30 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index b23c02337..34aa77521 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -366,30 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 4eec078e5..dea0c9a60 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -90,27 +90,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec - _ [8]byte -} - type Statfs_t struct { Version uint32 Type uint32 @@ -136,31 +115,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -181,14 +135,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -337,41 +283,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -432,6 +346,8 @@ type FpReg struct { Pad [64]uint8 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 Offs *byte @@ -444,8 +360,9 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7622904a5..da0ea0d60 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,41 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -435,6 +350,8 @@ type FpReg struct { Spare [12]uint64 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 Offs *byte @@ -449,6 +366,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 19223ce8e..da8f74045 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -33,7 +33,7 @@ type Timeval struct { _ [4]byte } -type Time_t int32 +type Time_t int64 type Rusage struct { Utime Timeval @@ -88,26 +88,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -133,31 +113,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -179,14 +134,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -335,41 +282,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -386,15 +301,15 @@ type PtraceLwpInfoStruct struct { } type __Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - Status int32 - Addr *byte - Value [4]byte - X_reason [32]byte + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + _ [32]byte } type Sigset_t struct { @@ -402,16 +317,22 @@ type Sigset_t struct { } type Reg struct { - R [13]uint32 - R_sp uint32 - R_lr uint32 - R_pc uint32 - R_cpsr uint32 + R [13]uint32 + Sp uint32 + Lr uint32 + Pc uint32 + Cpsr uint32 } type FpReg struct { - Fpr_fpsr uint32 - Fpr [8][3]uint32 + Fpsr uint32 + Fpr [8]FpExtendedPrecision +} + +type FpExtendedPrecision struct { + Exponent uint32 + Mantissa_hi uint32 + Mantissa_lo uint32 } type PtraceIoDesc struct { @@ -426,8 +347,11 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + _ [4]byte + Data int64 Udata *byte + _ [4]byte + Ext [4]uint64 } type FdSet struct { @@ -453,7 +377,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -464,7 +388,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -532,7 +455,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -543,7 +466,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -560,7 +483,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 8e3e33f67..d69988e5e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,39 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -413,6 +330,8 @@ type FpReg struct { _ [8]byte } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 Offs *byte @@ -427,6 +346,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go new file mode 100644 index 000000000..d6fd9e883 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -0,0 +1,626 @@ +// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Time_t int64 + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 +) + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + _ [4]byte +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __Siginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + _ [40]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + Ra uint64 + Sp uint64 + Gp uint64 + Tp uint64 + T [7]uint64 + S [12]uint64 + A [8]uint64 + Sepc uint64 + Sstatus uint64 +} + +type FpReg struct { + X [32][2]uint64 + Fcsr uint64 +} + +type FpExtendedPrecision struct{} + +type PtraceIoDesc struct { + Op int32 + Offs *byte + Addr *byte + Len uint64 +} + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte + Ext [4]uint64 +} + +type FdSet struct { + Bits [16]uint64 +} + +const ( + sizeofIfMsghdr = 0xa8 + SizeofIfMsghdr = 0xa8 + sizeofIfData = 0x98 + SizeofIfData = 0x98 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x98 + SizeofRtMetrics = 0x70 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + _ [8]byte + _ [16]byte +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Epoch int64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ uint16 + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint64 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Expire uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Pksent uint64 + Weight uint64 + Nhidx uint64 + Filler [2]uint64 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0x18 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint64 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + _ [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x100 + AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type CapRights struct { + Rights [2]uint64 +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 2c26466e0..ff6881167 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -754,6 +754,25 @@ const ( AT_SYMLINK_NOFOLLOW = 0x100 AT_EACCESS = 0x200 + + OPEN_TREE_CLONE = 0x1 + + MOVE_MOUNT_F_SYMLINKS = 0x1 + MOVE_MOUNT_F_AUTOMOUNTS = 0x2 + MOVE_MOUNT_F_EMPTY_PATH = 0x4 + MOVE_MOUNT_T_SYMLINKS = 0x10 + MOVE_MOUNT_T_AUTOMOUNTS = 0x20 + MOVE_MOUNT_T_EMPTY_PATH = 0x40 + MOVE_MOUNT_SET_GROUP = 0x100 + + FSOPEN_CLOEXEC = 0x1 + + FSPICK_CLOEXEC = 0x1 + FSPICK_SYMLINK_NOFOLLOW = 0x2 + FSPICK_NO_AUTOMOUNT = 0x4 + FSPICK_EMPTY_PATH = 0x8 + + FSMOUNT_CLOEXEC = 0x1 ) type OpenHow struct { @@ -926,6 +945,9 @@ type PerfEventAttr struct { Aux_watermark uint32 Sample_max_stack uint16 _ uint16 + Aux_sample_size uint32 + _ uint32 + Sig_data uint64 } type PerfEventMmapPage struct { @@ -1108,7 +1130,9 @@ const ( PERF_BR_SYSRET = 0x8 PERF_BR_COND_CALL = 0x9 PERF_BR_COND_RET = 0xa - PERF_BR_MAX = 0xb + PERF_BR_ERET = 0xb + PERF_BR_IRQ = 0xc + PERF_BR_MAX = 0xd PERF_SAMPLE_REGS_ABI_NONE = 0x0 PERF_SAMPLE_REGS_ABI_32 = 0x1 PERF_SAMPLE_REGS_ABI_64 = 0x2 @@ -1442,6 +1466,11 @@ const ( IFLA_ALT_IFNAME = 0x35 IFLA_PERM_ADDRESS = 0x36 IFLA_PROTO_DOWN_REASON = 0x37 + IFLA_PARENT_DEV_NAME = 0x38 + IFLA_PARENT_DEV_BUS_NAME = 0x39 + IFLA_GRO_MAX_SIZE = 0x3a + IFLA_TSO_MAX_SIZE = 0x3b + IFLA_TSO_MAX_SEGS = 0x3c IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -2950,7 +2979,7 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x4d + DEVLINK_CMD_MAX = 0x51 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3179,7 +3208,7 @@ const ( DEVLINK_ATTR_RATE_NODE_NAME = 0xa8 DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9 DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa - DEVLINK_ATTR_MAX = 0xaa + DEVLINK_ATTR_MAX = 0xae DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3619,7 +3648,11 @@ const ( ETHTOOL_A_RINGS_RX_MINI = 0x7 ETHTOOL_A_RINGS_RX_JUMBO = 0x8 ETHTOOL_A_RINGS_TX = 0x9 - ETHTOOL_A_RINGS_MAX = 0x9 + ETHTOOL_A_RINGS_RX_BUF_LEN = 0xa + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb + ETHTOOL_A_RINGS_CQE_SIZE = 0xc + ETHTOOL_A_RINGS_TX_PUSH = 0xd + ETHTOOL_A_RINGS_MAX = 0xd ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -4304,7 +4337,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x133 + NL80211_ATTR_MAX = 0x137 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4530,7 +4563,7 @@ const ( NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY = 0x3 NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5 NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1 - NL80211_BAND_IFTYPE_ATTR_MAX = 0x7 + NL80211_BAND_IFTYPE_ATTR_MAX = 0xb NL80211_BAND_S1GHZ = 0x4 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2 NL80211_BITRATE_ATTR_MAX = 0x2 @@ -4645,7 +4678,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x92 + NL80211_CMD_MAX = 0x93 NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_NAN_MATCH = 0x78 NL80211_CMD_NEW_BEACON = 0xf @@ -4868,7 +4901,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x19 + NL80211_FREQUENCY_ATTR_MAX = 0x1b NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5235,7 +5268,7 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 - NL80211_RATE_INFO_MAX = 0x11 + NL80211_RATE_INFO_MAX = 0x16 NL80211_RATE_INFO_MCS = 0x2 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 @@ -5532,3 +5565,45 @@ const ( NL80211_WPA_VERSION_2 = 0x2 NL80211_WPA_VERSION_3 = 0x4 ) + +const ( + FRA_UNSPEC = 0x0 + FRA_DST = 0x1 + FRA_SRC = 0x2 + FRA_IIFNAME = 0x3 + FRA_GOTO = 0x4 + FRA_UNUSED2 = 0x5 + FRA_PRIORITY = 0x6 + FRA_UNUSED3 = 0x7 + FRA_UNUSED4 = 0x8 + FRA_UNUSED5 = 0x9 + FRA_FWMARK = 0xa + FRA_FLOW = 0xb + FRA_TUN_ID = 0xc + FRA_SUPPRESS_IFGROUP = 0xd + FRA_SUPPRESS_PREFIXLEN = 0xe + FRA_TABLE = 0xf + FRA_FWMASK = 0x10 + FRA_OIFNAME = 0x11 + FRA_PAD = 0x12 + FRA_L3MDEV = 0x13 + FRA_UID_RANGE = 0x14 + FRA_PROTOCOL = 0x15 + FRA_IP_PROTO = 0x16 + FRA_SPORT_RANGE = 0x17 + FRA_DPORT_RANGE = 0x18 + FR_ACT_UNSPEC = 0x0 + FR_ACT_TO_TBL = 0x1 + FR_ACT_GOTO = 0x2 + FR_ACT_NOP = 0x3 + FR_ACT_RES3 = 0x4 + FR_ACT_RES4 = 0x5 + FR_ACT_BLACKHOLE = 0x6 + FR_ACT_UNREACHABLE = 0x7 + FR_ACT_PROHIBIT = 0x8 +) + +const ( + AUDIT_NLGRP_NONE = 0x0 + AUDIT_NLGRP_READLOG = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 531aefab7..263604401 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/386/cgo -- -Wall -Werror -static -I/tmp/386/include -m32 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -240,6 +240,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -318,6 +322,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 727f664bb..8187489d1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/amd64/cgo -- -Wall -Werror -static -I/tmp/amd64/include -m64 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -255,6 +255,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -332,6 +336,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 639141b1e..d1612335f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/arm/cgo -- -Wall -Werror -static -I/tmp/arm/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -231,6 +231,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -309,6 +313,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6cb03b1d9..c28e5556b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/arm64/cgo -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -234,6 +234,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -311,6 +315,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go new file mode 100644 index 000000000..187061f9f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -0,0 +1,685 @@ +// cgo -godefs -objdir=/tmp/loong64/cgo -- -Wall -Werror -static -I/tmp/loong64/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SizeofPtr = 0x8 + SizeofLong = 0x8 +) + +type ( + _C_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint64 + Size int64 + Blksize int32 + _ int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [2]int32 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + +const ( + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + +const ( + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 +) + +const ( + SizeofSockFprog = 0x10 +) + +type PtraceRegs struct { + Regs [32]uint64 + Orig_a0 uint64 + Era uint64 + Badv uint64 + Reserved [10]uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + _ [0]int8 + _ [4]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + +const ( + POLLRDHUP = 0x2000 +) + +type Sigset_t struct { + Val [16]uint64 +} + +const _C__NSIG = 0x41 + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Taskstats struct { + Version uint16 + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 + Thrashing_count uint64 + Thrashing_delay_total uint64 + Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 +} + +type cpuMask uint64 + +const ( + _NCPUBITS = 0x40 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +const ( + SizeofTpacketHdr = 0x20 +) + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 +) + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 4a1555455..369129917 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips/cgo -- -Wall -Werror -static -I/tmp/mips/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -236,6 +236,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -314,6 +318,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index e1084926f..7473468d7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips64/cgo -- -Wall -Werror -static -I/tmp/mips64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -237,6 +237,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -314,6 +318,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 8904ac84e..ed9448524 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips64le/cgo -- -Wall -Werror -static -I/tmp/mips64le/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -237,6 +237,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -314,6 +318,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index a1a28cc7d..0892a73a4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mipsle/cgo -- -Wall -Werror -static -I/tmp/mipsle/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -236,6 +236,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -314,6 +318,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index abdc53452..e1dd48333 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc/cgo -- -Wall -Werror -static -I/tmp/ppc/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -243,6 +243,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -321,6 +325,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index f4afbbe00..d9f654c7b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc64/cgo -- -Wall -Werror -static -I/tmp/ppc64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -244,6 +244,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -321,6 +325,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index ea0b8406d..74acda9fe 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc64le/cgo -- -Wall -Werror -static -I/tmp/ppc64le/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -244,6 +244,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -321,6 +325,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 85d0a0d50..50ebe69eb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/riscv64/cgo -- -Wall -Werror -static -I/tmp/riscv64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -262,6 +262,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -339,6 +343,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 81dd9c22c..75b34c259 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/s390x/cgo -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -257,6 +257,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -334,6 +338,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 6991b00b5..429c3bf7d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/sparc64/cgo -- -Wall -Werror -static -I/tmp/sparc64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -239,6 +239,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x400000 +) + const ( POLLRDHUP = 0x800 ) @@ -316,6 +320,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index baf5fe650..2ed718ca0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte Pad_cgo_0 [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index e21ae8ecf..b4fb97ebe 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -96,10 +96,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index f190651cd..2c4675040 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -98,10 +98,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 84747c582..ddee04514 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index ac5c8b637..eb13d4e8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index ad4aad279..c1a9b83ad 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -178,7 +178,7 @@ type Linger struct { } type Iovec struct { - Base *int8 + Base *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 855698bb2..75980fd44 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -15,11 +15,11 @@ import ( // in http://msdn.microsoft.com/en-us/library/ms880421. // This function returns "" (2 double quotes) if s is empty. // Alternatively, these transformations are done: -// - every back slash (\) is doubled, but only if immediately -// followed by double quote ("); -// - every double quote (") is escaped by back slash (\); -// - finally, s is wrapped with double quotes (arg -> "arg"), -// but only if there is space or tab inside s. +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { return "\"\"" diff --git a/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/golang.org/x/sys/windows/svc/service.go index 5b05c3e33..806baa055 100644 --- a/vendor/golang.org/x/sys/windows/svc/service.go +++ b/vendor/golang.org/x/sys/windows/svc/service.go @@ -6,7 +6,6 @@ // +build windows // Package svc provides everything required to build Windows service. -// package svc import ( diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ce3075c45..e27913817 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -417,6 +417,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation //sys GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) = psapi.GetModuleFileNameExW //sys GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) = psapi.GetModuleBaseNameW +//sys QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSetEx // NT Native APIs //sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb @@ -623,7 +624,6 @@ var ( func getStdHandle(stdhandle uint32) (fd Handle) { r, _ := GetStdHandle(stdhandle) - CloseOnExec(r) return r } @@ -862,6 +862,7 @@ const socket_error = uintptr(^uint32(0)) //sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar +//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -971,6 +972,32 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { return unsafe.Pointer(&sa.raw), sl, nil } +type RawSockaddrBth struct { + AddressFamily [2]byte + BtAddr [8]byte + ServiceClassId [16]byte + Port [4]byte +} + +type SockaddrBth struct { + BtAddr uint64 + ServiceClassId GUID + Port uint32 + + raw RawSockaddrBth +} + +func (sa *SockaddrBth) sockaddr() (unsafe.Pointer, int32, error) { + family := AF_BTH + sa.raw = RawSockaddrBth{ + AddressFamily: *(*[2]byte)(unsafe.Pointer(&family)), + BtAddr: *(*[8]byte)(unsafe.Pointer(&sa.BtAddr)), + Port: *(*[4]byte)(unsafe.Pointer(&sa.Port)), + ServiceClassId: *(*[16]byte)(unsafe.Pointer(&sa.ServiceClassId)), + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: @@ -1046,6 +1073,14 @@ func Connect(fd Handle, sa Sockaddr) (err error) { return connect(fd, ptr, n) } +func GetBestInterfaceEx(sa Sockaddr, pdwBestIfIndex *uint32) (err error) { + ptr, _, err := sa.sockaddr() + if err != nil { + return err + } + return getBestInterfaceEx(ptr, pdwBestIfIndex) +} + func Getsockname(fd Handle) (sa Sockaddr, err error) { var rsa RawSockaddrAny l := int32(unsafe.Sizeof(rsa)) @@ -1699,3 +1734,71 @@ func LoadResourceData(module, resInfo Handle) (data []byte, err error) { h.Cap = int(size) return } + +// PSAPI_WORKING_SET_EX_BLOCK contains extended working set information for a page. +type PSAPI_WORKING_SET_EX_BLOCK uint64 + +// Valid returns the validity of this page. +// If this bit is 1, the subsequent members are valid; otherwise they should be ignored. +func (b PSAPI_WORKING_SET_EX_BLOCK) Valid() bool { + return (b & 1) == 1 +} + +// ShareCount is the number of processes that share this page. The maximum value of this member is 7. +func (b PSAPI_WORKING_SET_EX_BLOCK) ShareCount() uint64 { + return b.intField(1, 3) +} + +// Win32Protection is the memory protection attributes of the page. For a list of values, see +// https://docs.microsoft.com/en-us/windows/win32/memory/memory-protection-constants +func (b PSAPI_WORKING_SET_EX_BLOCK) Win32Protection() uint64 { + return b.intField(4, 11) +} + +// Shared returns the shared status of this page. +// If this bit is 1, the page can be shared. +func (b PSAPI_WORKING_SET_EX_BLOCK) Shared() bool { + return (b & (1 << 15)) == 1 +} + +// Node is the NUMA node. The maximum value of this member is 63. +func (b PSAPI_WORKING_SET_EX_BLOCK) Node() uint64 { + return b.intField(16, 6) +} + +// Locked returns the locked status of this page. +// If this bit is 1, the virtual page is locked in physical memory. +func (b PSAPI_WORKING_SET_EX_BLOCK) Locked() bool { + return (b & (1 << 22)) == 1 +} + +// LargePage returns the large page status of this page. +// If this bit is 1, the page is a large page. +func (b PSAPI_WORKING_SET_EX_BLOCK) LargePage() bool { + return (b & (1 << 23)) == 1 +} + +// Bad returns the bad status of this page. +// If this bit is 1, the page is has been reported as bad. +func (b PSAPI_WORKING_SET_EX_BLOCK) Bad() bool { + return (b & (1 << 31)) == 1 +} + +// intField extracts an integer field in the PSAPI_WORKING_SET_EX_BLOCK union. +func (b PSAPI_WORKING_SET_EX_BLOCK) intField(start, length int) uint64 { + var mask PSAPI_WORKING_SET_EX_BLOCK + for pos := start; pos < start+length; pos++ { + mask |= (1 << pos) + } + + masked := b & mask + return uint64(masked >> start) +} + +// PSAPI_WORKING_SET_EX_INFORMATION contains extended working set information for a process. +type PSAPI_WORKING_SET_EX_INFORMATION struct { + // The virtual address. + VirtualAddress Pointer + // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. + VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index e19471c6a..f9eaca528 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -160,6 +160,10 @@ const ( MAX_COMPUTERNAME_LENGTH = 15 + MAX_DHCPV6_DUID_LENGTH = 130 + + MAX_DNS_SUFFIX_STRING_LENGTH = 256 + TIME_ZONE_ID_UNKNOWN = 0 TIME_ZONE_ID_STANDARD = 1 @@ -2000,27 +2004,62 @@ type IpAdapterPrefix struct { } type IpAdapterAddresses struct { - Length uint32 - IfIndex uint32 - Next *IpAdapterAddresses - AdapterName *byte - FirstUnicastAddress *IpAdapterUnicastAddress - FirstAnycastAddress *IpAdapterAnycastAddress - FirstMulticastAddress *IpAdapterMulticastAddress - FirstDnsServerAddress *IpAdapterDnsServerAdapter - DnsSuffix *uint16 - Description *uint16 - FriendlyName *uint16 - PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte - PhysicalAddressLength uint32 - Flags uint32 - Mtu uint32 - IfType uint32 - OperStatus uint32 - Ipv6IfIndex uint32 - ZoneIndices [16]uint32 - FirstPrefix *IpAdapterPrefix - /* more fields might be present here. */ + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + FirstWinsServerAddress *IpAdapterWinsServerAddress + FirstGatewayAddress *IpAdapterGatewayAddress + Ipv4Metric uint32 + Ipv6Metric uint32 + Luid uint64 + Dhcpv4Server SocketAddress + CompartmentId uint32 + NetworkGuid GUID + ConnectionType uint32 + TunnelType uint32 + Dhcpv6Server SocketAddress + Dhcpv6ClientDuid [MAX_DHCPV6_DUID_LENGTH]byte + Dhcpv6ClientDuidLength uint32 + Dhcpv6Iaid uint32 + FirstDnsSuffix *IpAdapterDNSSuffix +} + +type IpAdapterWinsServerAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterWinsServerAddress + Address SocketAddress +} + +type IpAdapterGatewayAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterGatewayAddress + Address SocketAddress +} + +type IpAdapterDNSSuffix struct { + Next *IpAdapterDNSSuffix + String [MAX_DNS_SUFFIX_STRING_LENGTH]uint16 } const ( diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 68f52c1e6..52d4742cb 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -177,6 +177,7 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -407,6 +408,7 @@ var ( procGetModuleBaseNameW = modpsapi.NewProc("GetModuleBaseNameW") procGetModuleFileNameExW = modpsapi.NewProc("GetModuleFileNameExW") procGetModuleInformation = modpsapi.NewProc("GetModuleInformation") + procQueryWorkingSetEx = modpsapi.NewProc("QueryWorkingSetEx") procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") @@ -1539,6 +1541,14 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { return } +func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetIfEntry(pIfRow *MibIfRow) (errcode error) { r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) if r0 != 0 { @@ -3495,6 +3505,14 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb return } +func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { ret = procSubscribeServiceChangeNotifications.Find() if ret != nil { diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/tools/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/tools/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index a5c6d6d4f..9fa5aa192 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -22,9 +22,9 @@ import ( // additional whitespace abutting a node to be enclosed by it. // In this example: // -// z := x + y // add them -// <-A-> -// <----B-----> +// z := x + y // add them +// <-A-> +// <----B-----> // // the ast.BinaryExpr(+) node is considered to enclose interval B // even though its [Pos()..End()) is actually only interval A. @@ -43,10 +43,10 @@ import ( // interior whitespace of path[0]. // In this example: // -// z := x + y // add them -// <--C--> <---E--> -// ^ -// D +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D // // intervals C, D and E are inexact. C is contained by the // z-assignment statement, because it spans three of its children (:=, @@ -54,12 +54,11 @@ import ( // interior whitespace of the assignment. E is considered interior // whitespace of the BlockStmt containing the assignment. // -// Precondition: [start, end) both lie within the same file as root. -// TODO(adonovan): return (nil, false) in this case and remove precond. -// Requires FileSet; see loader.tokenFileContainsPos. -// -// Postcondition: path is never nil; it always contains at least 'root'. -// +// The resulting path is never empty; it always contains at least the +// 'root' *ast.File. Ideally PathEnclosingInterval would reject +// intervals that lie wholly or partially outside the range of the +// file, but unfortunately ast.File records only the token.Pos of +// the 'package' keyword, but not of the start of the file itself. func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging @@ -135,6 +134,7 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod return false // inexact: overlaps multiple children } + // Ensure [start,end) is nondecreasing. if start > end { start, end = end, start } @@ -162,7 +162,6 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // tokenNode is a dummy implementation of ast.Node for a single token. // They are used transiently by PathEnclosingInterval but never escape // this package. -// type tokenNode struct { pos token.Pos end token.Pos @@ -183,7 +182,6 @@ func tok(pos token.Pos, len int) ast.Node { // childrenOf returns the direct non-nil children of ast.Node n. // It may include fake ast.Node implementations for bare tokens. // it is not safe to call (e.g.) ast.Walk on such nodes. -// func childrenOf(n ast.Node) []ast.Node { var children []ast.Node @@ -488,7 +486,6 @@ func (sl byPos) Swap(i, j int) { // TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, // StarExpr) we could be much more specific given the path to the AST // root. Perhaps we should do that. -// func NodeDescription(n ast.Node) string { switch n := n.(type) { case *ast.ArrayType: diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 2087ceec9..18d1adb05 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -22,8 +22,11 @@ func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { // If name is not empty, it is used to rename the import. // // For example, calling +// // AddNamedImport(fset, f, "pathpkg", "path") +// // adds +// // import pathpkg "path" func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { if imports(f, name, path) { @@ -270,8 +273,8 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del } if j > 0 { lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) - lastLine := fset.Position(lastImpspec.Path.ValuePos).Line - line := fset.Position(impspec.Path.ValuePos).Line + lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line + line := fset.PositionFor(impspec.Path.ValuePos, false).Line // We deleted an entry but now there may be // a blank line-sized hole where the import was. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 6d9ca23e2..f430b21b9 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -41,7 +41,6 @@ type ApplyFunc func(*Cursor) bool // Children are traversed in the order in which they appear in the // respective node's struct definition. A package's files are // traversed in the filenames' alphabetical order. -// func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { parent := &struct{ ast.Node }{root} defer func() { @@ -65,8 +64,8 @@ var abort = new(int) // singleton, to signal termination of Apply // c.Parent(), and f is the field identifier with name c.Name(), // the following invariants hold: // -// p.f == c.Node() if c.Index() < 0 -// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 // // The methods Replace, Delete, InsertBefore, and InsertAfter // can be used to change the AST without disrupting Apply. @@ -294,6 +293,9 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "Fields", nil, n.Fields) case *ast.FuncType: + if tparams := typeparams.ForFuncType(n); tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } a.apply(n, "Params", nil, n.Params) a.apply(n, "Results", nil, n.Results) @@ -406,6 +408,9 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. case *ast.TypeSpec: a.apply(n, "Doc", nil, n.Doc) a.apply(n, "Name", nil, n.Name) + if tparams := typeparams.ForTypeSpec(n); tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } a.apply(n, "Type", nil, n.Type) a.apply(n, "Comment", nil, n.Comment) diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index cec819d64..2ed25a750 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -17,32 +17,47 @@ // developer tools, which will then be able to consume both Go 1.7 and // Go 1.8 export data files, so they will work before and after the // Go update. (See discussion at https://golang.org/issue/15651.) -// package gcexportdata // import "golang.org/x/tools/go/gcexportdata" import ( "bufio" "bytes" + "encoding/json" "fmt" "go/token" "go/types" "io" "io/ioutil" + "os/exec" "golang.org/x/tools/go/internal/gcimporter" ) // Find returns the name of an object (.o) or archive (.a) file // containing type information for the specified import path, -// using the workspace layout conventions of go/build. +// using the go command. // If no file was found, an empty filename is returned. // // A relative srcDir is interpreted relative to the current working directory. // // Find also returns the package's resolved (canonical) import path, // reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func Find(importPath, srcDir string) (filename, path string) { - return gcimporter.FindPkg(importPath, srcDir) + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath } // NewReader returns a reader for the export data section of an object @@ -101,13 +116,29 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err - } + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) - return pkg, err + case 'v', 'c', 'd': + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err + + case 'u': + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) } // Write writes encoded type information for the specified package to out. diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go index efe221e7e..37a7247e2 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/importer.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -23,6 +23,8 @@ import ( // or to control the FileSet or access the imports map populated during // package loading. // +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { return importer{fset, imports} } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go index 0a3cdb9a3..196cb3f9b 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -35,16 +35,18 @@ import ( const debugFormat = false // default: false // Current export format version. Increase with each format change. +// // Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding const exportVersion = 4 // trackAllTypes enables cycle tracking for all types, not just named diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go index 3ab66830d..e96c39600 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -45,7 +45,6 @@ var pkgExts = [...]string{".a", ".o"} // the build.Default build.Context). A relative srcDir is interpreted // relative to the current working directory. // If no file was found, an empty filename is returned. -// func FindPkg(path, srcDir string) (filename, id string) { if path == "" { return @@ -109,7 +108,6 @@ func FindPkg(path, srcDir string) (filename, id string) { // If packages[id] contains the completely imported package, that package // can be used directly, and there is no need to call this function (but // there is also no harm but for extra time used). -// func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { // support for parser error handling defer func() { @@ -133,7 +131,6 @@ func ImportData(packages map[string]*types.Package, filename, id string, data io // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. -// func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser var filename, id string @@ -184,8 +181,9 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func defer rc.Close() var hdr string + var size int64 buf := bufio.NewReader(rc) - if hdr, _, err = FindExportData(buf); err != nil { + if hdr, size, err = FindExportData(buf); err != nil { return } @@ -213,10 +211,27 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err = IImportData(fset, packages, data[1:], id) - } else { - _, pkg, err = BImportData(fset, packages, data, id) + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := BImportData(fset, packages, data, id) + return pkg, err + + case 'u': + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } } default: @@ -348,8 +363,9 @@ func (p *parser) expectKeyword(keyword string) { // ---------------------------------------------------------------------------- // Qualified and unqualified names -// PackageId = string_lit . +// parsePackageID parses a PackageId: // +// PackageId = string_lit . func (p *parser) parsePackageID() string { id, err := strconv.Unquote(p.expect(scanner.String)) if err != nil { @@ -363,13 +379,16 @@ func (p *parser) parsePackageID() string { return id } -// PackageName = ident . +// parsePackageName parse a PackageName: // +// PackageName = ident . func (p *parser) parsePackageName() string { return p.expect(scanner.Ident) } -// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +// parseDotIdent parses a dotIdentifier: +// +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . func (p *parser) parseDotIdent() string { ident := "" if p.tok != scanner.Int { @@ -386,8 +405,9 @@ func (p *parser) parseDotIdent() string { return ident } -// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// parseQualifiedName parses a QualifiedName: // +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . func (p *parser) parseQualifiedName() (id, name string) { p.expect('@') id = p.parsePackageID() @@ -410,7 +430,6 @@ func (p *parser) parseQualifiedName() (id, name string) { // id identifies a package, usually by a canonical package path like // "encoding/json" but possibly by a non-canonical import path like // "./json". -// func (p *parser) getPkg(id, name string) *types.Package { // package unsafe is not in the packages maps - handle explicitly if id == "unsafe" { @@ -446,7 +465,6 @@ func (p *parser) getPkg(id, name string) *types.Package { // parseExportedName is like parseQualifiedName, but // the package id is resolved to an imported *types.Package. -// func (p *parser) parseExportedName() (pkg *types.Package, name string) { id, name := p.parseQualifiedName() pkg = p.getPkg(id, "") @@ -456,8 +474,9 @@ func (p *parser) parseExportedName() (pkg *types.Package, name string) { // ---------------------------------------------------------------------------- // Types -// BasicType = identifier . +// parseBasicType parses a BasicType: // +// BasicType = identifier . func (p *parser) parseBasicType() types.Type { id := p.expect(scanner.Ident) obj := types.Universe.Lookup(id) @@ -468,8 +487,9 @@ func (p *parser) parseBasicType() types.Type { return nil } -// ArrayType = "[" int_lit "]" Type . +// parseArrayType parses an ArrayType: // +// ArrayType = "[" int_lit "]" Type . func (p *parser) parseArrayType(parent *types.Package) types.Type { // "[" already consumed and lookahead known not to be "]" lit := p.expect(scanner.Int) @@ -482,8 +502,9 @@ func (p *parser) parseArrayType(parent *types.Package) types.Type { return types.NewArray(elem, n) } -// MapType = "map" "[" Type "]" Type . +// parseMapType parses a MapType: // +// MapType = "map" "[" Type "]" Type . func (p *parser) parseMapType(parent *types.Package) types.Type { p.expectKeyword("map") p.expect('[') @@ -493,7 +514,9 @@ func (p *parser) parseMapType(parent *types.Package) types.Type { return types.NewMap(key, elem) } -// Name = identifier | "?" | QualifiedName . +// parseName parses a Name: +// +// Name = identifier | "?" | QualifiedName . // // For unqualified and anonymous names, the returned package is the parent // package unless parent == nil, in which case the returned package is the @@ -505,7 +528,6 @@ func (p *parser) parseMapType(parent *types.Package) types.Type { // it doesn't exist yet) unless materializePkg is set (which creates an // unnamed package with valid package path). In the latter case, a // subsequent import clause is expected to provide a name for the package. -// func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { pkg = parent if pkg == nil { @@ -539,8 +561,9 @@ func deref(typ types.Type) types.Type { return typ } -// Field = Name Type [ string_lit ] . +// parseField parses a Field: // +// Field = Name Type [ string_lit ] . func (p *parser) parseField(parent *types.Package) (*types.Var, string) { pkg, name := p.parseName(parent, true) @@ -583,9 +606,10 @@ func (p *parser) parseField(parent *types.Package) (*types.Var, string) { return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag } -// StructType = "struct" "{" [ FieldList ] "}" . -// FieldList = Field { ";" Field } . +// parseStructType parses a StructType: // +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . func (p *parser) parseStructType(parent *types.Package) types.Type { var fields []*types.Var var tags []string @@ -610,8 +634,9 @@ func (p *parser) parseStructType(parent *types.Package) types.Type { return types.NewStruct(fields, tags) } -// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// parseParameter parses a Parameter: // +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { _, name := p.parseName(nil, false) // remove gc-specific parameter numbering @@ -635,9 +660,10 @@ func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { return } -// Parameters = "(" [ ParameterList ] ")" . -// ParameterList = { Parameter "," } Parameter . +// parseParameters parses a Parameters: // +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { p.expect('(') for p.tok != ')' && p.tok != scanner.EOF { @@ -658,9 +684,10 @@ func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { return } -// Signature = Parameters [ Result ] . -// Result = Type | Parameters . +// parseSignature parses a Signature: // +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . func (p *parser) parseSignature(recv *types.Var) *types.Signature { params, isVariadic := p.parseParameters() @@ -677,14 +704,15 @@ func (p *parser) parseSignature(recv *types.Var) *types.Signature { return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) } -// InterfaceType = "interface" "{" [ MethodList ] "}" . -// MethodList = Method { ";" Method } . -// Method = Name Signature . +// parseInterfaceType parses an InterfaceType: +// +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . // // The methods of embedded interfaces are always "inlined" // by the compiler and thus embedded interfaces are never // visible in the export data. -// func (p *parser) parseInterfaceType(parent *types.Package) types.Type { var methods []*types.Func @@ -705,8 +733,9 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type { return newInterface(methods, nil).Complete() } -// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// parseChanType parses a ChanType: // +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . func (p *parser) parseChanType(parent *types.Package) types.Type { dir := types.SendRecv if p.tok == scanner.Ident { @@ -724,17 +753,18 @@ func (p *parser) parseChanType(parent *types.Package) types.Type { return types.NewChan(dir, elem) } -// Type = -// BasicType | TypeName | ArrayType | SliceType | StructType | -// PointerType | FuncType | InterfaceType | MapType | ChanType | -// "(" Type ")" . +// parseType parses a Type: // -// BasicType = ident . -// TypeName = ExportedName . -// SliceType = "[" "]" Type . -// PointerType = "*" Type . -// FuncType = "func" Signature . +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . // +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . func (p *parser) parseType(parent *types.Package) types.Type { switch p.tok { case scanner.Ident: @@ -786,16 +816,18 @@ func (p *parser) parseType(parent *types.Package) types.Type { // ---------------------------------------------------------------------------- // Declarations -// ImportDecl = "import" PackageName PackageId . +// parseImportDecl parses an ImportDecl: // +// ImportDecl = "import" PackageName PackageId . func (p *parser) parseImportDecl() { p.expectKeyword("import") name := p.parsePackageName() p.getPkg(p.parsePackageID(), name) } -// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// parseInt parses an int_lit: // +// int_lit = [ "+" | "-" ] { "0" ... "9" } . func (p *parser) parseInt() string { s := "" switch p.tok { @@ -808,8 +840,9 @@ func (p *parser) parseInt() string { return s + p.expect(scanner.Int) } -// number = int_lit [ "p" int_lit ] . +// parseNumber parses a number: // +// number = int_lit [ "p" int_lit ] . func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { // mantissa mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) @@ -844,13 +877,14 @@ func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { return } -// ConstDecl = "const" ExportedName [ Type ] "=" Literal . -// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . -// bool_lit = "true" | "false" . -// complex_lit = "(" float_lit "+" float_lit "i" ")" . -// rune_lit = "(" int_lit "+" int_lit ")" . -// string_lit = `"` { unicode_char } `"` . +// parseConstDecl parses a ConstDecl: // +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . func (p *parser) parseConstDecl() { p.expectKeyword("const") pkg, name := p.parseExportedName() @@ -920,8 +954,9 @@ func (p *parser) parseConstDecl() { pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) } -// TypeDecl = "type" ExportedName Type . +// parseTypeDecl parses a TypeDecl: // +// TypeDecl = "type" ExportedName Type . func (p *parser) parseTypeDecl() { p.expectKeyword("type") pkg, name := p.parseExportedName() @@ -939,8 +974,9 @@ func (p *parser) parseTypeDecl() { } } -// VarDecl = "var" ExportedName Type . +// parseVarDecl parses a VarDecl: // +// VarDecl = "var" ExportedName Type . func (p *parser) parseVarDecl() { p.expectKeyword("var") pkg, name := p.parseExportedName() @@ -948,9 +984,10 @@ func (p *parser) parseVarDecl() { pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) } -// Func = Signature [ Body ] . -// Body = "{" ... "}" . +// parseFunc parses a Func: // +// Func = Signature [ Body ] . +// Body = "{" ... "}" . func (p *parser) parseFunc(recv *types.Var) *types.Signature { sig := p.parseSignature(recv) if p.tok == '{' { @@ -967,9 +1004,10 @@ func (p *parser) parseFunc(recv *types.Var) *types.Signature { return sig } -// MethodDecl = "func" Receiver Name Func . -// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// parseMethodDecl parses a MethodDecl: // +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . func (p *parser) parseMethodDecl() { // "func" already consumed p.expect('(') @@ -992,8 +1030,9 @@ func (p *parser) parseMethodDecl() { base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) } -// FuncDecl = "func" ExportedName Func . +// parseFuncDecl parses a FuncDecl: // +// FuncDecl = "func" ExportedName Func . func (p *parser) parseFuncDecl() { // "func" already consumed pkg, name := p.parseExportedName() @@ -1001,8 +1040,9 @@ func (p *parser) parseFuncDecl() { pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) } -// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// parseDecl parses a Decl: // +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . func (p *parser) parseDecl() { if p.tok == scanner.Ident { switch p.lit { @@ -1029,9 +1069,10 @@ func (p *parser) parseDecl() { // ---------------------------------------------------------------------------- // Export -// Export = "PackageClause { Decl } "$$" . -// PackageClause = "package" PackageName [ "safe" ] "\n" . +// parseExport parses an Export: // +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . func (p *parser) parseExport() *types.Package { p.expectKeyword("package") name := p.parsePackageName() diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go index 209553409..9a4ff329e 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -251,7 +251,10 @@ func (p *iexporter) stringOff(s string) uint64 { // pushDecl adds n to the declaration work queue, if not already present. func (p *iexporter) pushDecl(obj types.Object) { // Package unsafe is known to the compiler and predeclared. - assert(obj.Pkg() != types.Unsafe) + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } if _, ok := p.declIndex[obj]; ok { return diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go index 84cfb807d..4caa0f55d 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -17,6 +17,7 @@ import ( "go/token" "go/types" "io" + "math/big" "sort" "strings" @@ -53,7 +54,7 @@ const ( ) type ident struct { - pkg string + pkg *types.Package name string } @@ -100,7 +101,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data if !debug { defer func() { if e := recover(); e != nil { - if version > currentVersion { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) @@ -461,7 +464,7 @@ func (r *importReader) obj(name string) { // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg.Name(), name} + id := ident{r.currPkg, name} r.p.tparamIndex[id] = t var implicit bool if r.p.version >= iexportVersionGo1_18 { @@ -510,7 +513,9 @@ func (r *importReader) value() (typ types.Type, val constant.Value) { val = constant.MakeString(r.string()) case types.IsInteger: - val = r.mpint(b) + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) case types.IsFloat: val = r.mpfloat(b) @@ -559,8 +564,8 @@ func intSize(b *types.Basic) (signed bool, maxBytes uint) { return } -func (r *importReader) mpint(b *types.Basic) constant.Value { - signed, maxBytes := intSize(b) +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) maxSmall := 256 - maxBytes if signed { @@ -579,7 +584,8 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { v = ^v } } - return constant.MakeInt64(v) + x.SetInt64(v) + return } v := -n @@ -589,47 +595,23 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { if v < 1 || uint(v) > maxBytes { errorf("weird decoding: %v, %v => %v", n, signed, v) } - - buf := make([]byte, v) - io.ReadFull(&r.declReader, buf) - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { - buf[i], buf[j] = buf[j], buf[i] - } - - x := constant.MakeFromBytes(buf) + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) if signed && n&1 != 0 { - x = constant.UnaryOp(token.SUB, x, 0) + x.Neg(x) } - return x } -func (r *importReader) mpfloat(b *types.Basic) constant.Value { - x := r.mpint(b) - if constant.Sign(x) == 0 { - return x +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) } - - exp := r.int64() - switch { - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - // Ensure that the imported Kind is Float, else this constant may run into - // bitsize limits on overlarge integers. Eventually we can instead adopt - // the approach of CL 288632, but that CL relies on go/constant APIs that - // were introduced in go1.13. - // - // TODO(rFindley): sync the logic here with tip Go once we no longer - // support go1.12. - x = constant.ToFloat(x) - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - } - return x + return constant.Make(&f) } func (r *importReader) ident() string { @@ -777,7 +759,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { errorf("unexpected type param type") } pkg, name := r.qualifiedIdent() - id := ident{pkg.Name(), name} + id := ident{pkg, name} if t, ok := r.p.tparamIndex[id]; ok { // We're already in the process of importing this typeparam. return t diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go new file mode 100644 index 000000000..286bf4454 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(go1.18 && goexperiment.unified) +// +build !go1.18 !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go new file mode 100644 index 000000000..b5d69ffbe --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 && goexperiment.unified +// +build go1.18,goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go new file mode 100644 index 000000000..8eb20729c --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_no.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" +) + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go new file mode 100644 index 000000000..3c1a43754 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/ureader_yes.go @@ -0,0 +1,612 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import ( + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename := r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + imports := make([]*types.Package, r.Len()) + for i := range imports { + imports[i] = r.pkg() + } + pkg.SetImports(imports) + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ := r.doTyp() + assert(typ != nil) + + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName := rname.qualifiedIdent() + assert(objName != "") + + tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(types.NewTypeName(pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + // TODO(mdempsky): Rewrite receiver types to underlying is an + // Interface? The go/types importer does this (I think because + // unit tests expected that), but cmd/compile doesn't care + // about it, so maybe we can avoid worrying about that here. + rhs := r.typ() + r.p.later(func() { + underlying := rhs.Underlying() + named.SetUnderlying(underlying) + }) + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + + var dict readerDict + + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go new file mode 100644 index 000000000..f0cabde96 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go new file mode 100644 index 000000000..2bc793668 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/decoder.go @@ -0,0 +1,433 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, os.SEEK_CUR) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := binary.ReadUvarint(&r.Data) + r.checkErr(err) + return x +} + +func (r *Decoder) rawVarint() int64 { + ux := r.rawUvarint() + + // Zig-zag decode. + x := int64(ux >> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Int64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + r := pr.NewDecoder(RelocPkg, idx, SyncPkgDef) + path := r.String() + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + r := pr.NewDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + path := pr.PeekPkgPath(r.Reloc(RelocPkg)) + name := r.String() + assert(name != "") + + tag := CodeObj(r.Code(SyncCodeObj)) + + return path, name, tag +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go b/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go new file mode 100644 index 000000000..c8a2796b5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go new file mode 100644 index 000000000..c50c838ca --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/encoder.go @@ -0,0 +1,379 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + // TODO(mdempsky): Use map for lookup; this takes quadratic time. + for i, rEnt := range w.Relocs { + if rEnt.Kind == r && rEnt.Idx == idx { + return i + } + } + + i := len(w.Relocs) + w.Relocs = append(w.Relocs, RelocEnt{r, idx}) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Len encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go b/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go new file mode 100644 index 000000000..654222745 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go new file mode 100644 index 000000000..5294f6a63 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go new file mode 100644 index 000000000..2324ae7ad --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go b/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go new file mode 100644 index 000000000..7a8f04ab3 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int + +// An Index represents a bitstream element index within a particular +// section. +type Index int + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/support.go b/vendor/golang.org/x/tools/go/internal/pkgbits/support.go new file mode 100644 index 000000000..ad26d3b28 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go new file mode 100644 index 000000000..5bd51ef71 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go new file mode 100644 index 000000000..4a5b0ca5f --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 4bfe28a51..da4ab89fe 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -67,7 +67,6 @@ Most tools should pass their command-line arguments (after any flags) uninterpreted to the loader, so that the loader can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. - */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 0e1e7f11f..de881562d 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -26,7 +26,6 @@ import ( "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/xerrors" ) // debug controls verbose logging. @@ -303,11 +302,12 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries } dirResponse, err := state.createDriverResponse(pattern) - // If there was an error loading the package, or the package is returned - // with errors, try to load the file as an ad-hoc package. + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. // Usually the error will appear in a returned package, but may not if we're // in module mode and the ad-hoc is located outside a module. - if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && len(dirResponse.Packages[0].Errors) == 1 { var queryErr error if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { @@ -393,6 +393,8 @@ type jsonPackage struct { CompiledGoFiles []string IgnoredGoFiles []string IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string CFiles []string CgoFiles []string CXXFiles []string @@ -444,7 +446,11 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse // Run "go list" for complete // information on the specified packages. - buf, err := state.invokeGo("list", golistargs(state.cfg, words)...) + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) if err != nil { return nil, err } @@ -565,6 +571,8 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), forTest: p.ForTest, depsErrors: p.DepsErrors, @@ -805,17 +813,83 @@ func absJoin(dir string, fileses ...[]string) (res []string) { return res } -func golistargs(cfg *Config, words []string) []string { +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&NeedTypes != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&needInternalForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "-e", "-json", + "-e", jsonFlag(cfg, goVersion), fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), // go list doesn't let you pass -test and -find together, // probably because you'd just get the TestMain. - fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") @@ -879,7 +953,7 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, if !ok { // Catastrophic error: // - context cancellation - return nil, xerrors.Errorf("couldn't run 'go': %w", err) + return nil, fmt.Errorf("couldn't run 'go': %w", err) } // Old go version? diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 7ea37e7ee..5c080d21b 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -15,7 +15,7 @@ var allModes = []LoadMode{ NeedCompiledGoFiles, NeedImports, NeedDeps, - NeedExportsFile, + NeedExportFile, NeedTypes, NeedSyntax, NeedTypesInfo, @@ -28,7 +28,7 @@ var modeStrings = []string{ "NeedCompiledGoFiles", "NeedImports", "NeedDeps", - "NeedExportsFile", + "NeedExportFile", "NeedTypes", "NeedSyntax", "NeedTypesInfo", diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 1b5424e78..a93dc6add 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -39,9 +39,6 @@ import ( // Load may return more information than requested. type LoadMode int -// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to -// NeedExportFile to make it consistent with the Package field it's adding. - const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota @@ -59,8 +56,8 @@ const ( // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. NeedDeps - // NeedExportsFile adds ExportFile. - NeedExportsFile + // NeedExportFile adds ExportFile. + NeedExportFile // NeedTypes adds Types, Fset, and IllTyped. NeedTypes @@ -74,12 +71,25 @@ const ( // NeedTypesSizes adds TypesSizes. NeedTypesSizes + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. typecheckCgo // NeedModule adds Module. NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns ) const ( @@ -102,6 +112,9 @@ const ( // Deprecated: LoadAllSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + NeedExportsFile = NeedExportFile ) // A Config specifies details about how packages should be loaded. @@ -296,6 +309,14 @@ type Package struct { // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. OtherFiles []string + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + // IgnoredFiles lists source files that are not part of the package // using the current build configuration but that might be part of // the package using other build configurations. @@ -389,6 +410,8 @@ func init() { config.(*Config).modFlag = value } packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -431,6 +454,8 @@ type flatPackage struct { GoFiles []string `json:",omitempty"` CompiledGoFiles []string `json:",omitempty"` OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` IgnoredFiles []string `json:",omitempty"` ExportFile string `json:",omitempty"` Imports map[string]string `json:",omitempty"` @@ -454,6 +479,8 @@ func (p *Package) MarshalJSON() ([]byte, error) { GoFiles: p.GoFiles, CompiledGoFiles: p.CompiledGoFiles, OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, IgnoredFiles: p.IgnoredFiles, ExportFile: p.ExportFile, } @@ -481,6 +508,8 @@ func (p *Package) UnmarshalJSON(b []byte) error { GoFiles: flat.GoFiles, CompiledGoFiles: flat.CompiledGoFiles, OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, ExportFile: flat.ExportFile, } if len(flat.Imports) > 0 { @@ -614,7 +643,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || // ... or if we need types and the exportData is invalid. We fall back to (incompletely) // typechecking packages from source if they fail to compile. - (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" lpkg := &loaderPackage{ Package: pkg, needtypes: needtypes, @@ -752,13 +781,19 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { ld.pkgs[i].OtherFiles = nil ld.pkgs[i].IgnoredFiles = nil } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } if ld.requestedMode&NeedCompiledGoFiles == 0 { ld.pkgs[i].CompiledGoFiles = nil } if ld.requestedMode&NeedImports == 0 { ld.pkgs[i].Imports = nil } - if ld.requestedMode&NeedExportsFile == 0 { + if ld.requestedMode&NeedExportFile == 0 { ld.pkgs[i].ExportFile = "" } if ld.requestedMode&NeedTypes == 0 { @@ -1053,7 +1088,6 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. -// func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { var wg sync.WaitGroup n := len(filenames) @@ -1097,7 +1131,6 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { // sameFile returns true if x and y have the same basename and denote // the same file. -// func sameFile(x, y string) bool { if x == y { // It could be the case that y doesn't exist. @@ -1210,8 +1243,13 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error if err != nil { return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } if viewLen != len(view) { - log.Fatalf("Unexpected package creation during export data loading") + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) } lpkg.Types = tpkg @@ -1222,17 +1260,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // impliedLoadMode returns loadMode with its dependencies. func impliedLoadMode(loadMode LoadMode) LoadMode { - if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 { - // If NeedTypesInfo, go/packages needs to do typechecking itself so it can - // associate type info with the AST. To do so, we need the export data - // for dependencies, which means we need to ask for the direct dependencies. - // NeedImports is used to ask for the direct dependencies. - loadMode |= NeedImports - } - - if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 { - // With NeedDeps we need to load at least direct dependencies. - // NeedImports is used to ask for the direct dependencies. + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. loadMode |= NeedImports } @@ -1240,5 +1269,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { } func usesExportData(cfg *Config) bool { - return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index 8be18a66b..d2547c743 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -40,7 +40,7 @@ var LocalPrefix string // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -// To process data ``as if'' it were in filename, pass the data as a non-nil src. +// To process data “as if” it were in filename, pass the data as a non-nil src. func Process(filename string, src []byte, opt *Options) ([]byte, error) { var err error if src == nil { diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go index 9887f7e7a..798fe599b 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -40,12 +40,12 @@ var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") // If fastWalk returns filepath.SkipDir, the directory is skipped. // // Unlike filepath.Walk: -// * file stat calls must be done by the user. +// - file stat calls must be done by the user. // The only provided metadata is the file type, which does not include // any permission bits. -// * multiple goroutines stat the filesystem concurrently. The provided +// - multiple goroutines stat the filesystem concurrently. The provided // walkFn must be safe for concurrent use. -// * fastWalk can follow symlinks if walkFn returns the TraverseLink +// - fastWalk can follow symlinks if walkFn returns the TraverseLink // sentinel error. It is the walkFn's responsibility to prevent // fastWalk from going into symlink cycles. func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index f75336834..67256dc39 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -264,8 +264,10 @@ func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { split := strings.SplitN(kv, "=", 2) - k, v := split[0], split[1] - env[k] = v + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } } var args []string diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 925ff5356..168405322 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -175,8 +175,8 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { // walk walks through the given path. func (w *walker) walk(path string, typ os.FileMode) error { - dir := filepath.Dir(path) if typ.IsRegular() { + dir := filepath.Dir(path) if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { // Doesn't make sense to have regular files // directly in your $GOPATH/src or $GOROOT/src. @@ -209,12 +209,7 @@ func (w *walker) walk(path string, typ os.FileMode) error { // Emacs noise. return nil } - fi, err := os.Lstat(path) - if err != nil { - // Just ignore it. - return nil - } - if w.shouldTraverse(dir, fi) { + if w.shouldTraverse(path) { return fastwalk.ErrTraverseLink } } @@ -224,13 +219,8 @@ func (w *walker) walk(path string, typ os.FileMode) error { // shouldTraverse reports whether the symlink fi, found in dir, // should be followed. It makes sure symlinks were never visited // before to avoid symlink loops. -func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { - path := filepath.Join(dir, fi.Name()) - target, err := filepath.EvalSymlinks(path) - if err != nil { - return false - } - ts, err := os.Stat(target) +func (w *walker) shouldTraverse(path string) bool { + ts, err := os.Stat(path) if err != nil { fmt.Fprintln(os.Stderr, err) return false @@ -238,7 +228,7 @@ func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { if !ts.IsDir() { return false } - if w.shouldSkipDir(ts, dir) { + if w.shouldSkipDir(ts, filepath.Dir(path)) { return false } // Check for symlink loops by statting each directory component diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index d859617b7..9e373d64e 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -796,7 +796,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } -var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"} +var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. @@ -906,7 +906,7 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { if err := e.init(); err != nil { return nil, err } - if len(e.Env["GOMOD"]) == 0 { + if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { e.resolver = newGopathResolver(e) return e.resolver, nil } diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 25973989e..95a88383a 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -103,12 +103,17 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e return formatFile(fileSet, file, src, nil, opt) } -func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { - mergeImports(fileSet, file) - sortImports(opt.LocalPrefix, fileSet, file) - imps := astutil.Imports(fileSet, file) +// formatFile formats the file syntax tree. +// It may mutate the token.FileSet. +// +// If an adjust function is provided, it is called after formatting +// with the original source (formatFile's src parameter) and the +// formatted file, and returns the postpocessed result. +func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { + mergeImports(file) + sortImports(opt.LocalPrefix, fset.File(file.Pos()), file) var spacesBefore []string // import paths we need spaces before - for _, impSection := range imps { + for _, impSection := range astutil.Imports(fset, file) { // Within each block of contiguous imports, see if any // import lines are in different group numbers. If so, // we'll need to put a space between them so it's @@ -132,7 +137,7 @@ func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func( printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} var buf bytes.Buffer - err := printConfig.Fprint(&buf, fileSet, file) + err := printConfig.Fprint(&buf, fset, file) if err != nil { return nil, err } @@ -276,11 +281,11 @@ func cutSpace(b []byte) (before, middle, after []byte) { } // matchSpace reformats src to use the same space context as orig. -// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src. -// 2) matchSpace copies the indentation of the first non-blank line in orig -// to every non-blank line in src. -// 3) matchSpace copies the trailing space from orig and uses it in place -// of src's trailing space. +// 1. If orig begins with blank lines, matchSpace inserts them at the beginning of src. +// 2. matchSpace copies the indentation of the first non-blank line in orig +// to every non-blank line in src. +// 3. matchSpace copies the trailing space from orig and uses it in place +// of src's trailing space. func matchSpace(orig []byte, src []byte) []byte { before, _, after := cutSpace(orig) i := bytes.LastIndex(before, []byte{'\n'}) diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 2bcf41f5f..46693f243 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -70,9 +70,17 @@ func (r *ModuleResolver) init() error { Logf: r.env.Logf, WorkingDir: r.env.WorkingDir, } - vendorEnabled, mainModVendor, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) - if err != nil { - return err + + vendorEnabled := false + var mainModVendor *gocommand.ModuleJSON + + // Module vendor directories are ignored in workspace mode: + // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md + if len(r.env.Env["GOWORK"]) == 0 { + vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) + if err != nil { + return err + } } if mainModVendor != nil && vendorEnabled { diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index dc52372e4..85144db1d 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // Hacked up copy of go/ast/import.go +// Modified to use a single token.File in preference to a FileSet. package imports @@ -16,7 +17,9 @@ import ( // sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. -func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) { +// +// It may mutate the token.File. +func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { @@ -39,21 +42,21 @@ func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) { i := 0 specs := d.Specs[:0] for j, s := range d.Specs { - if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { + if j > i && tokFile.Line(s.Pos()) > 1+tokFile.Line(d.Specs[j-1].End()) { // j begins a new run. End this one. - specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...) + specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:j])...) i = j } } - specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...) + specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:])...) d.Specs = specs // Deduping can leave a blank line before the rparen; clean that up. if len(d.Specs) > 0 { lastSpec := d.Specs[len(d.Specs)-1] - lastLine := fset.Position(lastSpec.Pos()).Line - if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 { - fset.File(d.Rparen).MergeLine(rParenLine - 1) + lastLine := tokFile.PositionFor(lastSpec.Pos(), false).Line + if rParenLine := tokFile.PositionFor(d.Rparen, false).Line; rParenLine > lastLine+1 { + tokFile.MergeLine(rParenLine - 1) // has side effects! } } } @@ -62,7 +65,7 @@ func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) { // mergeImports merges all the import declarations into the first one. // Taken from golang.org/x/tools/ast/astutil. // This does not adjust line numbers properly -func mergeImports(fset *token.FileSet, f *ast.File) { +func mergeImports(f *ast.File) { if len(f.Decls) <= 1 { return } @@ -144,7 +147,9 @@ type posSpan struct { End token.Pos } -func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { +// sortSpecs sorts the import specs within each import decl. +// It may mutate the token.File. +func sortSpecs(localPrefix string, tokFile *token.File, f *ast.File, specs []ast.Spec) []ast.Spec { // Can't short-circuit here even if specs are already sorted, // since they might yet need deduplication. // A lone import, however, may be safely ignored. @@ -160,7 +165,7 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast // Identify comments in this range. // Any comment from pos[0].Start to the final line counts. - lastLine := fset.Position(pos[len(pos)-1].End).Line + lastLine := tokFile.Line(pos[len(pos)-1].End) cstart := len(f.Comments) cend := len(f.Comments) for i, g := range f.Comments { @@ -170,7 +175,7 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast if i < cstart { cstart = i } - if fset.Position(g.End()).Line > lastLine { + if tokFile.Line(g.End()) > lastLine { cend = i break } @@ -203,7 +208,7 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast deduped = append(deduped, s) } else { p := s.Pos() - fset.File(p).MergeLine(fset.Position(p).Line) + tokFile.MergeLine(tokFile.Line(p)) // has side effects! } } specs = deduped @@ -234,21 +239,21 @@ func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast // Fixup comments can insert blank lines, because import specs are on different lines. // We remove those blank lines here by merging import spec to the first import spec line. - firstSpecLine := fset.Position(specs[0].Pos()).Line + firstSpecLine := tokFile.Line(specs[0].Pos()) for _, s := range specs[1:] { p := s.Pos() - line := fset.File(p).Line(p) + line := tokFile.Line(p) for previousLine := line - 1; previousLine >= firstSpecLine; { // MergeLine can panic. Avoid the panic at the cost of not removing the blank line // golang/go#50329 - if previousLine > 0 && previousLine < fset.File(p).LineCount() { - fset.File(p).MergeLine(previousLine) + if previousLine > 0 && previousLine < tokFile.LineCount() { + tokFile.MergeLine(previousLine) // has side effects! previousLine-- } else { // try to gather some data to diagnose how this could happen req := "Please report what the imports section of your go file looked like." log.Printf("panic avoided: first:%d line:%d previous:%d max:%d. %s", - firstSpecLine, line, previousLine, fset.File(p).LineCount(), req) + firstSpecLine, line, previousLine, tokFile.LineCount(), req) } } } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 7de2be9b4..437fbb78d 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -88,6 +88,7 @@ var stdlib = map[string][]string{ "ContainsAny", "ContainsRune", "Count", + "Cut", "Equal", "EqualFold", "ErrTooLarge", @@ -711,6 +712,11 @@ var stdlib = map[string][]string{ "ValueConverter", "Valuer", }, + "debug/buildinfo": []string{ + "BuildInfo", + "Read", + "ReadFile", + }, "debug/dwarf": []string{ "AddrType", "ArrayType", @@ -1944,6 +1950,7 @@ var stdlib = map[string][]string{ "R_PPC64_REL24_NOTOC", "R_PPC64_REL32", "R_PPC64_REL64", + "R_PPC64_RELATIVE", "R_PPC64_SECTOFF_DS", "R_PPC64_SECTOFF_LO_DS", "R_PPC64_TLS", @@ -2547,6 +2554,7 @@ var stdlib = map[string][]string{ "Symbol", }, "debug/plan9obj": []string{ + "ErrNoSymbols", "File", "FileHeader", "Magic386", @@ -2906,6 +2914,7 @@ var stdlib = map[string][]string{ "Importer", "IncDecStmt", "IndexExpr", + "IndexListExpr", "Inspect", "InterfaceType", "IsExported", @@ -3179,6 +3188,7 @@ var stdlib = map[string][]string{ "SUB", "SUB_ASSIGN", "SWITCH", + "TILDE", "TYPE", "Token", "UnaryPrec", @@ -3187,6 +3197,7 @@ var stdlib = map[string][]string{ "XOR_ASSIGN", }, "go/types": []string{ + "ArgumentError", "Array", "AssertableTo", "AssignableTo", @@ -3205,6 +3216,7 @@ var stdlib = map[string][]string{ "Complex64", "Config", "Const", + "Context", "ConvertibleTo", "DefPredeclaredTestFuncs", "Default", @@ -3224,6 +3236,8 @@ var stdlib = map[string][]string{ "ImporterFrom", "Info", "Initializer", + "Instance", + "Instantiate", "Int", "Int16", "Int32", @@ -3254,6 +3268,7 @@ var stdlib = map[string][]string{ "NewChan", "NewChecker", "NewConst", + "NewContext", "NewField", "NewFunc", "NewInterface", @@ -3268,10 +3283,14 @@ var stdlib = map[string][]string{ "NewPointer", "NewScope", "NewSignature", + "NewSignatureType", "NewSlice", "NewStruct", + "NewTerm", "NewTuple", "NewTypeName", + "NewTypeParam", + "NewUnion", "NewVar", "Nil", "Object", @@ -3296,11 +3315,15 @@ var stdlib = map[string][]string{ "StdSizes", "String", "Struct", + "Term", "Tuple", "Typ", "Type", "TypeAndValue", + "TypeList", "TypeName", + "TypeParam", + "TypeParamList", "TypeString", "Uint", "Uint16", @@ -3308,6 +3331,7 @@ var stdlib = map[string][]string{ "Uint64", "Uint8", "Uintptr", + "Union", "Universe", "Unsafe", "UnsafePointer", @@ -4080,9 +4104,11 @@ var stdlib = map[string][]string{ "SRV", "SplitHostPort", "TCPAddr", + "TCPAddrFromAddrPort", "TCPConn", "TCPListener", "UDPAddr", + "UDPAddrFromAddrPort", "UDPConn", "UnixAddr", "UnixConn", @@ -4142,6 +4168,7 @@ var stdlib = map[string][]string{ "ListenAndServe", "ListenAndServeTLS", "LocalAddrContextKey", + "MaxBytesHandler", "MaxBytesReader", "MethodConnect", "MethodDelete", @@ -4338,6 +4365,25 @@ var stdlib = map[string][]string{ "ParseDate", "ReadMessage", }, + "net/netip": []string{ + "Addr", + "AddrFrom16", + "AddrFrom4", + "AddrFromSlice", + "AddrPort", + "AddrPortFrom", + "IPv4Unspecified", + "IPv6LinkLocalAllNodes", + "IPv6Unspecified", + "MustParseAddr", + "MustParseAddrPort", + "MustParsePrefix", + "ParseAddr", + "ParseAddrPort", + "ParsePrefix", + "Prefix", + "PrefixFrom", + }, "net/rpc": []string{ "Accept", "Call", @@ -4641,6 +4687,8 @@ var stdlib = map[string][]string{ "Method", "New", "NewAt", + "Pointer", + "PointerTo", "Ptr", "PtrTo", "RecvDir", @@ -4819,9 +4867,11 @@ var stdlib = map[string][]string{ }, "runtime/debug": []string{ "BuildInfo", + "BuildSetting", "FreeOSMemory", "GCStats", "Module", + "ParseBuildInfo", "PrintStack", "ReadBuildInfo", "ReadGCStats", @@ -4939,11 +4989,13 @@ var stdlib = map[string][]string{ }, "strings": []string{ "Builder", + "Clone", "Compare", "Contains", "ContainsAny", "ContainsRune", "Count", + "Cut", "EqualFold", "Fields", "FieldsFunc", @@ -9793,6 +9845,7 @@ var stdlib = map[string][]string{ "Syscall18", "Syscall6", "Syscall9", + "SyscallN", "Sysctl", "SysctlUint32", "Sysctlnode", @@ -10202,7 +10255,6 @@ var stdlib = map[string][]string{ "Value", "ValueError", "ValueOf", - "Wrapper", }, "testing": []string{ "AllocsPerRun", @@ -10213,9 +10265,11 @@ var stdlib = map[string][]string{ "CoverBlock", "CoverMode", "Coverage", + "F", "Init", "InternalBenchmark", "InternalExample", + "InternalFuzzTarget", "InternalTest", "M", "Main", @@ -10313,9 +10367,11 @@ var stdlib = map[string][]string{ "ActionNode", "BoolNode", "BranchNode", + "BreakNode", "ChainNode", "CommandNode", "CommentNode", + "ContinueNode", "DotNode", "FieldNode", "IdentifierNode", @@ -10329,9 +10385,11 @@ var stdlib = map[string][]string{ "Node", "NodeAction", "NodeBool", + "NodeBreak", "NodeChain", "NodeCommand", "NodeComment", + "NodeContinue", "NodeDot", "NodeField", "NodeIdentifier", @@ -10727,6 +10785,7 @@ var stdlib = map[string][]string{ "IsSurrogate", }, "unicode/utf8": []string{ + "AppendRune", "DecodeLastRune", "DecodeLastRuneInString", "DecodeRune", diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 9702094c5..d9950b1f0 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -23,6 +23,8 @@ var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} var TypecheckCgo int +var DepsErrors int // must be set as a LoadMode to call GetDepsErrors +var ForTest int // must be set as a LoadMode to call GetForTest var SetModFlag = func(config interface{}, value string) {} var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index ab6b30b83..25a1426d3 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -16,11 +16,10 @@ // Additionally, this package contains common utilities for working with the // new generic constructs, to supplement the standard library APIs. Notably, // the StructuralTerms API computes a minimal representation of the structural -// restrictions on a type parameter. In the future, this API may be available -// from go/types. +// restrictions on a type parameter. // -// See the example/README.md for a more detailed guide on how to update tools -// to support generics. +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. package typeparams import ( @@ -121,15 +120,15 @@ func OriginMethod(fn *types.Func) *types.Func { // // For example, consider the following type declarations: // -// type Interface[T any] interface { -// Accept(T) -// } +// type Interface[T any] interface { +// Accept(T) +// } // -// type Container[T any] struct { -// Element T -// } +// type Container[T any] struct { +// Element T +// } // -// func (c Container[T]) Accept(t T) { c.Element = t } +// func (c Container[T]) Accept(t T) { c.Element = t } // // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 000000000..993135ec9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := _NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// _NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// _NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, _NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, _NormalTerms returns ErrEmptyTypeSet. +// +// _NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func _NormalTerms(typ types.Type) ([]*Term, error) { + switch typ := typ.(type) { + case *TypeParam: + return StructuralTerms(typ) + case *Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*Term{NewTerm(false, typ)}, nil + } +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 090f142a5..9c631b651 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -24,20 +24,22 @@ var ErrEmptyTypeSet = errors.New("empty type set") // Structural type restrictions of a type parameter are created via // non-interface types embedded in its constraint interface (directly, or via a // chain of interface embeddings). For example, in the declaration -// type T[P interface{~int; m()}] int +// +// type T[P interface{~int; m()}] int +// // the structural restriction of the type parameter P is ~int. // // With interface embedding and unions, the specification of structural type // restrictions may be arbitrarily complex. For example, consider the // following: // -// type A interface{ ~string|~[]byte } +// type A interface{ ~string|~[]byte } // -// type B interface{ int|string } +// type B interface{ int|string } // -// type C interface { ~string|~int } +// type C interface { ~string|~int } // -// type T[P interface{ A|B; C }] int +// type T[P interface{ A|B; C }] int // // In this example, the structural type restriction of P is ~string|int: A|B // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 10857d504..933106a23 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -97,15 +97,6 @@ func (xl termlist) norm() termlist { return rl } -// If the type set represented by xl is specified by a single (non-𝓤) term, -// structuralType returns that type. Otherwise it returns nil. -func (xl termlist) structuralType() types.Type { - if nl := xl.norm(); len(nl) == 1 { - return nl[0].typ // if nl.isAll() then typ is nil, which is ok - } - return nil -} - // union returns the union xl ∪ yl. func (xl termlist) union(yl termlist) termlist { return append(xl, yl...).norm() diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE deleted file mode 100644 index e4a47e17f..000000000 --- a/vendor/golang.org/x/xerrors/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2019 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/xerrors/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/xerrors/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README deleted file mode 100644 index aac7867a5..000000000 --- a/vendor/golang.org/x/xerrors/README +++ /dev/null @@ -1,2 +0,0 @@ -This repository holds the transition packages for the new Go 1.13 error values. -See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go deleted file mode 100644 index 4317f2483..000000000 --- a/vendor/golang.org/x/xerrors/adaptor.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strconv" -) - -// FormatError calls the FormatError method of f with an errors.Printer -// configured according to s and verb, and writes the result to s. -func FormatError(f Formatter, s fmt.State, verb rune) { - // Assuming this function is only called from the Format method, and given - // that FormatError takes precedence over Format, it cannot be called from - // any package that supports errors.Formatter. It is therefore safe to - // disregard that State may be a specific printer implementation and use one - // of our choice instead. - - // limitations: does not support printing error as Go struct. - - var ( - sep = " " // separator before next error - p = &state{State: s} - direct = true - ) - - var err error = f - - switch verb { - // Note that this switch must match the preference order - // for ordinary string printing (%#v before %+v, and so on). - - case 'v': - if s.Flag('#') { - if stringer, ok := err.(fmt.GoStringer); ok { - io.WriteString(&p.buf, stringer.GoString()) - goto exit - } - // proceed as if it were %v - } else if s.Flag('+') { - p.printDetail = true - sep = "\n - " - } - case 's': - case 'q', 'x', 'X': - // Use an intermediate buffer in the rare cases that precision, - // truncation, or one of the alternative verbs (q, x, and X) are - // specified. - direct = false - - default: - p.buf.WriteString("%!") - p.buf.WriteRune(verb) - p.buf.WriteByte('(') - switch { - case err != nil: - p.buf.WriteString(reflect.TypeOf(f).String()) - default: - p.buf.WriteString("<nil>") - } - p.buf.WriteByte(')') - io.Copy(s, &p.buf) - return - } - -loop: - for { - switch v := err.(type) { - case Formatter: - err = v.FormatError((*printer)(p)) - case fmt.Formatter: - v.Format(p, 'v') - break loop - default: - io.WriteString(&p.buf, v.Error()) - break loop - } - if err == nil { - break - } - if p.needColon || !p.printDetail { - p.buf.WriteByte(':') - p.needColon = false - } - p.buf.WriteString(sep) - p.inDetail = false - p.needNewline = false - } - -exit: - width, okW := s.Width() - prec, okP := s.Precision() - - if !direct || (okW && width > 0) || okP { - // Construct format string from State s. - format := []byte{'%'} - if s.Flag('-') { - format = append(format, '-') - } - if s.Flag('+') { - format = append(format, '+') - } - if s.Flag(' ') { - format = append(format, ' ') - } - if okW { - format = strconv.AppendInt(format, int64(width), 10) - } - if okP { - format = append(format, '.') - format = strconv.AppendInt(format, int64(prec), 10) - } - format = append(format, string(verb)...) - fmt.Fprintf(s, string(format), p.buf.String()) - } else { - io.Copy(s, &p.buf) - } -} - -var detailSep = []byte("\n ") - -// state tracks error printing state. It implements fmt.State. -type state struct { - fmt.State - buf bytes.Buffer - - printDetail bool - inDetail bool - needColon bool - needNewline bool -} - -func (s *state) Write(b []byte) (n int, err error) { - if s.printDetail { - if len(b) == 0 { - return 0, nil - } - if s.inDetail && s.needColon { - s.needNewline = true - if b[0] == '\n' { - b = b[1:] - } - } - k := 0 - for i, c := range b { - if s.needNewline { - if s.inDetail && s.needColon { - s.buf.WriteByte(':') - s.needColon = false - } - s.buf.Write(detailSep) - s.needNewline = false - } - if c == '\n' { - s.buf.Write(b[k:i]) - k = i + 1 - s.needNewline = true - } - } - s.buf.Write(b[k:]) - if !s.inDetail { - s.needColon = true - } - } else if !s.inDetail { - s.buf.Write(b) - } - return len(b), nil -} - -// printer wraps a state to implement an xerrors.Printer. -type printer state - -func (s *printer) Print(args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprint((*state)(s), args...) - } -} - -func (s *printer) Printf(format string, args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprintf((*state)(s), format, args...) - } -} - -func (s *printer) Detail() bool { - s.inDetail = true - return s.printDetail -} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg deleted file mode 100644 index 3f8b14b64..000000000 --- a/vendor/golang.org/x/xerrors/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go deleted file mode 100644 index eef99d9d5..000000000 --- a/vendor/golang.org/x/xerrors/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xerrors implements functions to manipulate errors. -// -// This package is based on the Go 2 proposal for error values: -// https://golang.org/design/29934-error-values -// -// These functions were incorporated into the standard library's errors package -// in Go 1.13: -// - Is -// - As -// - Unwrap -// -// Also, Errorf's %w verb was incorporated into fmt.Errorf. -// -// Use this package to get equivalent behavior in all supported Go versions. -// -// No other features of this package were included in Go 1.13, and at present -// there are no plans to include any of them. -package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go deleted file mode 100644 index e88d3772d..000000000 --- a/vendor/golang.org/x/xerrors/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import "fmt" - -// errorString is a trivial implementation of error. -type errorString struct { - s string - frame Frame -} - -// New returns an error that formats as the given text. -// -// The returned error contains a Frame set to the caller's location and -// implements Formatter to show this information when printed with details. -func New(text string) error { - return &errorString{text, Caller(1)} -} - -func (e *errorString) Error() string { - return e.s -} - -func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *errorString) FormatError(p Printer) (next error) { - p.Print(e.s) - e.frame.Format(p) - return nil -} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go deleted file mode 100644 index 829862ddf..000000000 --- a/vendor/golang.org/x/xerrors/fmt.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/xerrors/internal" -) - -const percentBangString = "%!" - -// Errorf formats according to a format specifier and returns the string as a -// value that satisfies error. -// -// The returned error includes the file and line number of the caller when -// formatted with additional detail enabled. If the last argument is an error -// the returned error's Format method will return it if the format string ends -// with ": %s", ": %v", or ": %w". If the last argument is an error and the -// format string ends with ": %w", the returned error implements an Unwrap -// method returning it. -// -// If the format specifier includes a %w verb with an error operand in a -// position other than at the end, the returned error will still implement an -// Unwrap method returning the operand, but the error's Format method will not -// return the wrapped error. -// -// It is invalid to include more than one %w verb or to supply it with an -// operand that does not implement the error interface. The %w verb is otherwise -// a synonym for %v. -func Errorf(format string, a ...interface{}) error { - format = formatPlusW(format) - // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. - wrap := strings.HasSuffix(format, ": %w") - idx, format2, ok := parsePercentW(format) - percentWElsewhere := !wrap && idx >= 0 - if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { - err := errorAt(a, len(a)-1) - if err == nil { - return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} - } - // TODO: this is not entirely correct. The error value could be - // printed elsewhere in format if it mixes numbered with unnumbered - // substitutions. With relatively small changes to doPrintf we can - // have it optionally ignore extra arguments and pass the argument - // list in its entirety. - msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - if wrap { - return &wrapError{msg, err, frame} - } - return &noWrapError{msg, err, frame} - } - // Support %w anywhere. - // TODO: don't repeat the wrapped error's message when %w occurs in the middle. - msg := fmt.Sprintf(format2, a...) - if idx < 0 { - return &noWrapError{msg, nil, Caller(1)} - } - err := errorAt(a, idx) - if !ok || err == nil { - // Too many %ws or argument of %w is not an error. Approximate the Go - // 1.13 fmt.Errorf message. - return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} - } - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - return &wrapError{msg, err, frame} -} - -func errorAt(args []interface{}, i int) error { - if i < 0 || i >= len(args) { - return nil - } - err, ok := args[i].(error) - if !ok { - return nil - } - return err -} - -// formatPlusW is used to avoid the vet check that will barf at %w. -func formatPlusW(s string) string { - return s -} - -// Return the index of the only %w in format, or -1 if none. -// Also return a rewritten format string with %w replaced by %v, and -// false if there is more than one %w. -// TODO: handle "%[N]w". -func parsePercentW(format string) (idx int, newFormat string, ok bool) { - // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. - idx = -1 - ok = true - n := 0 - sz := 0 - var isW bool - for i := 0; i < len(format); i += sz { - if format[i] != '%' { - sz = 1 - continue - } - // "%%" is not a format directive. - if i+1 < len(format) && format[i+1] == '%' { - sz = 2 - continue - } - sz, isW = parsePrintfVerb(format[i:]) - if isW { - if idx >= 0 { - ok = false - } else { - idx = n - } - // "Replace" the last character, the 'w', with a 'v'. - p := i + sz - 1 - format = format[:p] + "v" + format[p+1:] - } - n++ - } - return idx, format, ok -} - -// Parse the printf verb starting with a % at s[0]. -// Return how many bytes it occupies and whether the verb is 'w'. -func parsePrintfVerb(s string) (int, bool) { - // Assume only that the directive is a sequence of non-letters followed by a single letter. - sz := 0 - var r rune - for i := 1; i < len(s); i += sz { - r, sz = utf8.DecodeRuneInString(s[i:]) - if unicode.IsLetter(r) { - return i + sz, r == 'w' - } - } - return len(s), false -} - -type noWrapError struct { - msg string - err error - frame Frame -} - -func (e *noWrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *noWrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -type wrapError struct { - msg string - err error - frame Frame -} - -func (e *wrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *wrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -func (e *wrapError) Unwrap() error { - return e.err -} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go deleted file mode 100644 index 1bc9c26b9..000000000 --- a/vendor/golang.org/x/xerrors/format.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -// A Formatter formats error messages. -type Formatter interface { - error - - // FormatError prints the receiver's first error and returns the next error in - // the error chain, if any. - FormatError(p Printer) (next error) -} - -// A Printer formats error messages. -// -// The most common implementation of Printer is the one provided by package fmt -// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message -// typically provide their own implementations. -type Printer interface { - // Print appends args to the message output. - Print(args ...interface{}) - - // Printf writes a formatted string. - Printf(format string, args ...interface{}) - - // Detail reports whether error detail is requested. - // After the first call to Detail, all text written to the Printer - // is formatted as additional detail, or ignored when - // detail has not been requested. - // If Detail returns false, the caller can avoid printing the detail at all. - Detail() bool -} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go deleted file mode 100644 index 0de628ec5..000000000 --- a/vendor/golang.org/x/xerrors/frame.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "runtime" -) - -// A Frame contains part of a call stack. -type Frame struct { - // Make room for three PCs: the one we were asked for, what it called, - // and possibly a PC for skipPleaseUseCallersFrames. See: - // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 - frames [3]uintptr -} - -// Caller returns a Frame that describes a frame on the caller's stack. -// The argument skip is the number of frames to skip over. -// Caller(0) returns the frame for the caller of Caller. -func Caller(skip int) Frame { - var s Frame - runtime.Callers(skip+1, s.frames[:]) - return s -} - -// location reports the file, line, and function of a frame. -// -// The returned function may be "" even if file and line are not. -func (f Frame) location() (function, file string, line int) { - frames := runtime.CallersFrames(f.frames[:]) - if _, ok := frames.Next(); !ok { - return "", "", 0 - } - fr, ok := frames.Next() - if !ok { - return "", "", 0 - } - return fr.Function, fr.File, fr.Line -} - -// Format prints the stack as error detail. -// It should be called from an error's Format implementation -// after printing any other error detail. -func (f Frame) Format(p Printer) { - if p.Detail() { - function, file, line := f.location() - if function != "" { - p.Printf("%s\n ", function) - } - if file != "" { - p.Printf("%s:%d\n", file, line) - } - } -} diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go deleted file mode 100644 index 89f4eca5d..000000000 --- a/vendor/golang.org/x/xerrors/internal/internal.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// EnableTrace indicates whether stack information should be recorded in errors. -var EnableTrace = true diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go deleted file mode 100644 index 9a3b51037..000000000 --- a/vendor/golang.org/x/xerrors/wrap.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "reflect" -) - -// A Wrapper provides context around another error. -type Wrapper interface { - // Unwrap returns the next error in the error chain. - // If there is no next error, Unwrap returns nil. - Unwrap() error -} - -// Opaque returns an error with the same error formatting as err -// but that does not match err and cannot be unwrapped. -func Opaque(err error) error { - return noWrapper{err} -} - -type noWrapper struct { - error -} - -func (e noWrapper) FormatError(p Printer) (next error) { - if f, ok := e.error.(Formatter); ok { - return f.FormatError(p) - } - p.Print(e.error) - return nil -} - -// Unwrap returns the result of calling the Unwrap method on err, if err implements -// Unwrap. Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - u, ok := err.(Wrapper) - if !ok { - return nil - } - return u.Unwrap() -} - -// Is reports whether any error in err's chain matches target. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - // TODO: consider supporing target.Is(err). This would allow - // user-definable predicates, but also may allow for coping with sloppy - // APIs, thereby making it easier to get away with them. - if err = Unwrap(err); err == nil { - return false - } - } -} - -// As finds the first error in err's chain that matches the type to which target -// points, and if so, sets the target to its value and returns true. An error -// matches a type if it is assignable to the target type, or if it has a method -// As(interface{}) bool such that As(target) returns true. As will panic if target -// is not a non-nil pointer to a type which implements error or is of interface type. -// -// The As method should set the target to its value and return true if err -// matches the type to which target points. -func As(err error, target interface{}) bool { - if target == nil { - panic("errors: target cannot be nil") - } - val := reflect.ValueOf(target) - typ := val.Type() - if typ.Kind() != reflect.Ptr || val.IsNil() { - panic("errors: target must be a non-nil pointer") - } - if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { - panic("errors: *target must be interface or implement error") - } - targetType := typ.Elem() - for err != nil { - if reflect.TypeOf(err).AssignableTo(targetType) { - val.Elem().Set(reflect.ValueOf(err)) - return true - } - if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { - return true - } - err = Unwrap(err) - } - return false -} - -var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/gopkg.in/guregu/null.v4/.gitignore b/vendor/gopkg.in/guregu/null.v4/.gitignore new file mode 100644 index 000000000..e9eb644a6 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/.gitignore @@ -0,0 +1,2 @@ +coverage.out +.idea/ diff --git a/vendor/gopkg.in/guregu/null.v4/LICENSE b/vendor/gopkg.in/guregu/null.v4/LICENSE new file mode 100644 index 000000000..69062b45b --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/LICENSE @@ -0,0 +1,10 @@ +Copyright (c) 2014, Greg Roseberry +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/gopkg.in/guregu/null.v4/README.md b/vendor/gopkg.in/guregu/null.v4/README.md new file mode 100644 index 000000000..be0892c25 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/README.md @@ -0,0 +1,75 @@ +## null [![GoDoc](https://godoc.org/github.com/guregu/null?status.svg)](https://godoc.org/github.com/guregu/null) [![CircleCI](https://circleci.com/gh/guregu/null.svg?style=svg)](https://circleci.com/gh/guregu/null) +`import "gopkg.in/guregu/null.v4"` + +null is a library with reasonable options for dealing with nullable SQL and JSON values + +There are two packages: `null` and its subpackage `zero`. + +Types in `null` will only be considered null on null input, and will JSON encode to `null`. If you need zero and null be considered separate values, use these. + +Types in `zero` are treated like zero values in Go: blank string input will produce a null `zero.String`, and null Strings will JSON encode to `""`. Zero values of these types will be considered null to SQL. If you need zero and null treated the same, use these. + +All types implement `sql.Scanner` and `driver.Valuer`, so you can use this library in place of `sql.NullXXX`. All types also implement: `encoding.TextMarshaler`, `encoding.TextUnmarshaler`, `json.Marshaler`, and `json.Unmarshaler`. + +### null package + +`import "gopkg.in/guregu/null.v3"` + +#### null.String +Nullable string. + +Marshals to JSON null if SQL source data is null. Zero (blank) input will not produce a null String. Can unmarshal from `sql.NullString` JSON input or string input. + +#### null.Int +Nullable int64. + +Marshals to JSON null if SQL source data is null. Zero input will not produce a null Int. Can unmarshal from `sql.NullInt64` JSON input. + +#### null.Float +Nullable float64. + +Marshals to JSON null if SQL source data is null. Zero input will not produce a null Float. Can unmarshal from `sql.NullFloat64` JSON input. + +#### null.Bool +Nullable bool. + +Marshals to JSON null if SQL source data is null. False input will not produce a null Bool. Can unmarshal from `sql.NullBool` JSON input. + +#### null.Time + +Marshals to JSON null if SQL source data is null. Uses `time.Time`'s marshaler. Can unmarshal from `pq.NullTime` and similar JSON input. + +### zero package + +`import "gopkg.in/guregu/null.v3/zero"` + +#### zero.String +Nullable string. + +Will marshal to a blank string if null. Blank string input produces a null String. Null values and zero values are considered equivalent. Can unmarshal from `sql.NullString` JSON input. + +#### zero.Int +Nullable int64. + +Will marshal to 0 if null. 0 produces a null Int. Null values and zero values are considered equivalent. Can unmarshal from `sql.NullInt64` JSON input. + +#### zero.Float +Nullable float64. + +Will marshal to 0 if null. 0.0 produces a null Float. Null values and zero values are considered equivalent. Can unmarshal from `sql.NullFloat64` JSON input. + +#### zero.Bool +Nullable bool. + +Will marshal to false if null. `false` produces a null Float. Null values and zero values are considered equivalent. Can unmarshal from `sql.NullBool` JSON input. + +#### zero.Time + +Will marshal to the zero time if null. Uses `time.Time`'s marshaler. Can unmarshal from `pq.NullTime` and similar JSON input. + + +### Bugs +`json`'s `",omitempty"` struct tag does not work correctly right now. It will never omit a null or empty String. This might be [fixed eventually](https://github.com/golang/go/issues/11939). + +### License +BSD diff --git a/vendor/gopkg.in/guregu/null.v4/bool.go b/vendor/gopkg.in/guregu/null.v4/bool.go new file mode 100644 index 000000000..a27c74935 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/bool.go @@ -0,0 +1,130 @@ +package null + +import ( + "bytes" + "database/sql" + "encoding/json" + "errors" + "fmt" +) + +// Bool is a nullable bool. +// It does not consider false values to be null. +// It will decode to null, not false, if null. +type Bool struct { + sql.NullBool +} + +// NewBool creates a new Bool +func NewBool(b bool, valid bool) Bool { + return Bool{ + NullBool: sql.NullBool{ + Bool: b, + Valid: valid, + }, + } +} + +// BoolFrom creates a new Bool that will always be valid. +func BoolFrom(b bool) Bool { + return NewBool(b, true) +} + +// BoolFromPtr creates a new Bool that will be null if f is nil. +func BoolFromPtr(b *bool) Bool { + if b == nil { + return NewBool(false, false) + } + return NewBool(*b, true) +} + +// ValueOrZero returns the inner value if valid, otherwise false. +func (b Bool) ValueOrZero() bool { + return b.Valid && b.Bool +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports number and null input. +// 0 will not be considered a null Bool. +func (b *Bool) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, nullBytes) { + b.Valid = false + return nil + } + + if err := json.Unmarshal(data, &b.Bool); err != nil { + return fmt.Errorf("null: couldn't unmarshal JSON: %w", err) + } + + b.Valid = true + return nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null Bool if the input is blank. +// It will return an error if the input is not an integer, blank, or "null". +func (b *Bool) UnmarshalText(text []byte) error { + str := string(text) + switch str { + case "", "null": + b.Valid = false + return nil + case "true": + b.Bool = true + case "false": + b.Bool = false + default: + return errors.New("null: invalid input for UnmarshalText:" + str) + } + b.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +// It will encode null if this Bool is null. +func (b Bool) MarshalJSON() ([]byte, error) { + if !b.Valid { + return []byte("null"), nil + } + if !b.Bool { + return []byte("false"), nil + } + return []byte("true"), nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a blank string if this Bool is null. +func (b Bool) MarshalText() ([]byte, error) { + if !b.Valid { + return []byte{}, nil + } + if !b.Bool { + return []byte("false"), nil + } + return []byte("true"), nil +} + +// SetValid changes this Bool's value and also sets it to be non-null. +func (b *Bool) SetValid(v bool) { + b.Bool = v + b.Valid = true +} + +// Ptr returns a pointer to this Bool's value, or a nil pointer if this Bool is null. +func (b Bool) Ptr() *bool { + if !b.Valid { + return nil + } + return &b.Bool +} + +// IsZero returns true for invalid Bools, for future omitempty support (Go 1.4?) +// A non-null Bool with a 0 value will not be considered zero. +func (b Bool) IsZero() bool { + return !b.Valid +} + +// Equal returns true if both booleans have the same value or are both null. +func (b Bool) Equal(other Bool) bool { + return b.Valid == other.Valid && (!b.Valid || b.Bool == other.Bool) +} diff --git a/vendor/gopkg.in/guregu/null.v4/float.go b/vendor/gopkg.in/guregu/null.v4/float.go new file mode 100644 index 000000000..6ec31eb83 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/float.go @@ -0,0 +1,156 @@ +package null + +import ( + "bytes" + "database/sql" + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "strconv" +) + +// Float is a nullable float64. +// It does not consider zero values to be null. +// It will decode to null, not zero, if null. +type Float struct { + sql.NullFloat64 +} + +// NewFloat creates a new Float +func NewFloat(f float64, valid bool) Float { + return Float{ + NullFloat64: sql.NullFloat64{ + Float64: f, + Valid: valid, + }, + } +} + +// FloatFrom creates a new Float that will always be valid. +func FloatFrom(f float64) Float { + return NewFloat(f, true) +} + +// FloatFromPtr creates a new Float that be null if f is nil. +func FloatFromPtr(f *float64) Float { + if f == nil { + return NewFloat(0, false) + } + return NewFloat(*f, true) +} + +// ValueOrZero returns the inner value if valid, otherwise zero. +func (f Float) ValueOrZero() float64 { + if !f.Valid { + return 0 + } + return f.Float64 +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports number and null input. +// 0 will not be considered a null Float. +func (f *Float) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, nullBytes) { + f.Valid = false + return nil + } + + if err := json.Unmarshal(data, &f.Float64); err != nil { + var typeError *json.UnmarshalTypeError + if errors.As(err, &typeError) { + // special case: accept string input + if typeError.Value != "string" { + return fmt.Errorf("null: JSON input is invalid type (need float or string): %w", err) + } + var str string + if err := json.Unmarshal(data, &str); err != nil { + return fmt.Errorf("null: couldn't unmarshal number string: %w", err) + } + n, err := strconv.ParseFloat(str, 64) + if err != nil { + return fmt.Errorf("null: couldn't convert string to float: %w", err) + } + f.Float64 = n + f.Valid = true + return nil + } + return fmt.Errorf("null: couldn't unmarshal JSON: %w", err) + } + + f.Valid = true + return nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null Float if the input is blank. +// It will return an error if the input is not an integer, blank, or "null". +func (f *Float) UnmarshalText(text []byte) error { + str := string(text) + if str == "" || str == "null" { + f.Valid = false + return nil + } + var err error + f.Float64, err = strconv.ParseFloat(string(text), 64) + if err != nil { + return fmt.Errorf("null: couldn't unmarshal text: %w", err) + } + f.Valid = true + return err +} + +// MarshalJSON implements json.Marshaler. +// It will encode null if this Float is null. +func (f Float) MarshalJSON() ([]byte, error) { + if !f.Valid { + return []byte("null"), nil + } + if math.IsInf(f.Float64, 0) || math.IsNaN(f.Float64) { + return nil, &json.UnsupportedValueError{ + Value: reflect.ValueOf(f.Float64), + Str: strconv.FormatFloat(f.Float64, 'g', -1, 64), + } + } + return []byte(strconv.FormatFloat(f.Float64, 'f', -1, 64)), nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a blank string if this Float is null. +func (f Float) MarshalText() ([]byte, error) { + if !f.Valid { + return []byte{}, nil + } + return []byte(strconv.FormatFloat(f.Float64, 'f', -1, 64)), nil +} + +// SetValid changes this Float's value and also sets it to be non-null. +func (f *Float) SetValid(n float64) { + f.Float64 = n + f.Valid = true +} + +// Ptr returns a pointer to this Float's value, or a nil pointer if this Float is null. +func (f Float) Ptr() *float64 { + if !f.Valid { + return nil + } + return &f.Float64 +} + +// IsZero returns true for invalid Floats, for future omitempty support (Go 1.4?) +// A non-null Float with a 0 value will not be considered zero. +func (f Float) IsZero() bool { + return !f.Valid +} + +// Equal returns true if both floats have the same value or are both null. +// Warning: calculations using floating point numbers can result in different ways +// the numbers are stored in memory. Therefore, this function is not suitable to +// compare the result of a calculation. Use this method only to check if the value +// has changed in comparison to some previous value. +func (f Float) Equal(other Float) bool { + return f.Valid == other.Valid && (!f.Valid || f.Float64 == other.Float64) +} diff --git a/vendor/gopkg.in/guregu/null.v4/int.go b/vendor/gopkg.in/guregu/null.v4/int.go new file mode 100644 index 000000000..adc36ae56 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/int.go @@ -0,0 +1,144 @@ +package null + +import ( + "bytes" + "database/sql" + "encoding/json" + "errors" + "fmt" + "strconv" +) + +// Int is an nullable int64. +// It does not consider zero values to be null. +// It will decode to null, not zero, if null. +type Int struct { + sql.NullInt64 +} + +// NewInt creates a new Int +func NewInt(i int64, valid bool) Int { + return Int{ + NullInt64: sql.NullInt64{ + Int64: i, + Valid: valid, + }, + } +} + +// IntFrom creates a new Int that will always be valid. +func IntFrom(i int64) Int { + return NewInt(i, true) +} + +// IntFromPtr creates a new Int that be null if i is nil. +func IntFromPtr(i *int64) Int { + if i == nil { + return NewInt(0, false) + } + return NewInt(*i, true) +} + +// ValueOrZero returns the inner value if valid, otherwise zero. +func (i Int) ValueOrZero() int64 { + if !i.Valid { + return 0 + } + return i.Int64 +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports number, string, and null input. +// 0 will not be considered a null Int. +func (i *Int) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, nullBytes) { + i.Valid = false + return nil + } + + if err := json.Unmarshal(data, &i.Int64); err != nil { + var typeError *json.UnmarshalTypeError + if errors.As(err, &typeError) { + // special case: accept string input + if typeError.Value != "string" { + return fmt.Errorf("null: JSON input is invalid type (need int or string): %w", err) + } + var str string + if err := json.Unmarshal(data, &str); err != nil { + return fmt.Errorf("null: couldn't unmarshal number string: %w", err) + } + n, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return fmt.Errorf("null: couldn't convert string to int: %w", err) + } + i.Int64 = n + i.Valid = true + return nil + } + return fmt.Errorf("null: couldn't unmarshal JSON: %w", err) + } + + i.Valid = true + return nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null Int if the input is blank. +// It will return an error if the input is not an integer, blank, or "null". +func (i *Int) UnmarshalText(text []byte) error { + str := string(text) + if str == "" || str == "null" { + i.Valid = false + return nil + } + var err error + i.Int64, err = strconv.ParseInt(string(text), 10, 64) + if err != nil { + return fmt.Errorf("null: couldn't unmarshal text: %w", err) + } + i.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +// It will encode null if this Int is null. +func (i Int) MarshalJSON() ([]byte, error) { + if !i.Valid { + return []byte("null"), nil + } + return []byte(strconv.FormatInt(i.Int64, 10)), nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a blank string if this Int is null. +func (i Int) MarshalText() ([]byte, error) { + if !i.Valid { + return []byte{}, nil + } + return []byte(strconv.FormatInt(i.Int64, 10)), nil +} + +// SetValid changes this Int's value and also sets it to be non-null. +func (i *Int) SetValid(n int64) { + i.Int64 = n + i.Valid = true +} + +// Ptr returns a pointer to this Int's value, or a nil pointer if this Int is null. +func (i Int) Ptr() *int64 { + if !i.Valid { + return nil + } + return &i.Int64 +} + +// IsZero returns true for invalid Ints, for future omitempty support (Go 1.4?) +// A non-null Int with a 0 value will not be considered zero. +func (i Int) IsZero() bool { + return !i.Valid +} + +// Equal returns true if both ints have the same value or are both null. +func (i Int) Equal(other Int) bool { + return i.Valid == other.Valid && (!i.Valid || i.Int64 == other.Int64) +} diff --git a/vendor/gopkg.in/guregu/null.v4/string.go b/vendor/gopkg.in/guregu/null.v4/string.go new file mode 100644 index 000000000..67f6aaf09 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/string.go @@ -0,0 +1,118 @@ +// Package null contains SQL types that consider zero input and null input as separate values, +// with convenient support for JSON and text marshaling. +// Types in this package will always encode to their null value if null. +// Use the zero subpackage if you want zero values and null to be treated the same. +package null + +import ( + "bytes" + "database/sql" + "encoding/json" + "fmt" +) + +// nullBytes is a JSON null literal +var nullBytes = []byte("null") + +// String is a nullable string. It supports SQL and JSON serialization. +// It will marshal to null if null. Blank string input will be considered null. +type String struct { + sql.NullString +} + +// StringFrom creates a new String that will never be blank. +func StringFrom(s string) String { + return NewString(s, true) +} + +// StringFromPtr creates a new String that be null if s is nil. +func StringFromPtr(s *string) String { + if s == nil { + return NewString("", false) + } + return NewString(*s, true) +} + +// ValueOrZero returns the inner value if valid, otherwise zero. +func (s String) ValueOrZero() string { + if !s.Valid { + return "" + } + return s.String +} + +// NewString creates a new String +func NewString(s string, valid bool) String { + return String{ + NullString: sql.NullString{ + String: s, + Valid: valid, + }, + } +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports string and null input. Blank string input does not produce a null String. +func (s *String) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, nullBytes) { + s.Valid = false + return nil + } + + if err := json.Unmarshal(data, &s.String); err != nil { + return fmt.Errorf("null: couldn't unmarshal JSON: %w", err) + } + + s.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +// It will encode null if this String is null. +func (s String) MarshalJSON() ([]byte, error) { + if !s.Valid { + return []byte("null"), nil + } + return json.Marshal(s.String) +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a blank string when this String is null. +func (s String) MarshalText() ([]byte, error) { + if !s.Valid { + return []byte{}, nil + } + return []byte(s.String), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null String if the input is a blank string. +func (s *String) UnmarshalText(text []byte) error { + s.String = string(text) + s.Valid = s.String != "" + return nil +} + +// SetValid changes this String's value and also sets it to be non-null. +func (s *String) SetValid(v string) { + s.String = v + s.Valid = true +} + +// Ptr returns a pointer to this String's value, or a nil pointer if this String is null. +func (s String) Ptr() *string { + if !s.Valid { + return nil + } + return &s.String +} + +// IsZero returns true for null strings, for potential future omitempty support. +func (s String) IsZero() bool { + return !s.Valid +} + +// Equal returns true if both strings have the same value or are both null. +func (s String) Equal(other String) bool { + return s.Valid == other.Valid && (!s.Valid || s.String == other.String) +} diff --git a/vendor/gopkg.in/guregu/null.v4/time.go b/vendor/gopkg.in/guregu/null.v4/time.go new file mode 100644 index 000000000..15c16cf65 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/time.go @@ -0,0 +1,140 @@ +package null + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/json" + "fmt" + "time" +) + +// Time is a nullable time.Time. It supports SQL and JSON serialization. +// It will marshal to null if null. +type Time struct { + sql.NullTime +} + +// Value implements the driver Valuer interface. +func (t Time) Value() (driver.Value, error) { + if !t.Valid { + return nil, nil + } + return t.Time, nil +} + +// NewTime creates a new Time. +func NewTime(t time.Time, valid bool) Time { + return Time{ + NullTime: sql.NullTime{ + Time: t, + Valid: valid, + }, + } +} + +// TimeFrom creates a new Time that will always be valid. +func TimeFrom(t time.Time) Time { + return NewTime(t, true) +} + +// TimeFromPtr creates a new Time that will be null if t is nil. +func TimeFromPtr(t *time.Time) Time { + if t == nil { + return NewTime(time.Time{}, false) + } + return NewTime(*t, true) +} + +// ValueOrZero returns the inner value if valid, otherwise zero. +func (t Time) ValueOrZero() time.Time { + if !t.Valid { + return time.Time{} + } + return t.Time +} + +// MarshalJSON implements json.Marshaler. +// It will encode null if this time is null. +func (t Time) MarshalJSON() ([]byte, error) { + if !t.Valid { + return []byte("null"), nil + } + return t.Time.MarshalJSON() +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports string and null input. +func (t *Time) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, nullBytes) { + t.Valid = false + return nil + } + + if err := json.Unmarshal(data, &t.Time); err != nil { + return fmt.Errorf("null: couldn't unmarshal JSON: %w", err) + } + + t.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +// It returns an empty string if invalid, otherwise time.Time's MarshalText. +func (t Time) MarshalText() ([]byte, error) { + if !t.Valid { + return []byte{}, nil + } + return t.Time.MarshalText() +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It has backwards compatibility with v3 in that the string "null" is considered equivalent to an empty string +// and unmarshaling will succeed. This may be removed in a future version. +func (t *Time) UnmarshalText(text []byte) error { + str := string(text) + // allowing "null" is for backwards compatibility with v3 + if str == "" || str == "null" { + t.Valid = false + return nil + } + if err := t.Time.UnmarshalText(text); err != nil { + return fmt.Errorf("null: couldn't unmarshal text: %w", err) + } + t.Valid = true + return nil +} + +// SetValid changes this Time's value and sets it to be non-null. +func (t *Time) SetValid(v time.Time) { + t.Time = v + t.Valid = true +} + +// Ptr returns a pointer to this Time's value, or a nil pointer if this Time is null. +func (t Time) Ptr() *time.Time { + if !t.Valid { + return nil + } + return &t.Time +} + +// IsZero returns true for invalid Times, hopefully for future omitempty support. +// A non-null Time with a zero value will not be considered zero. +func (t Time) IsZero() bool { + return !t.Valid +} + +// Equal returns true if both Time objects encode the same time or are both null. +// Two times can be equal even if they are in different locations. +// For example, 6:00 +0200 CEST and 4:00 UTC are Equal. +func (t Time) Equal(other Time) bool { + return t.Valid == other.Valid && (!t.Valid || t.Time.Equal(other.Time)) +} + +// ExactEqual returns true if both Time objects are equal or both null. +// ExactEqual returns false for times that are in different locations or +// have a different monotonic clock reading. +func (t Time) ExactEqual(other Time) bool { + return t.Valid == other.Valid && (!t.Valid || t.Time == other.Time) +} diff --git a/vendor/gopkg.in/guregu/null.v4/zero/bool.go b/vendor/gopkg.in/guregu/null.v4/zero/bool.go new file mode 100644 index 000000000..0612287b4 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/zero/bool.go @@ -0,0 +1,123 @@ +package zero + +import ( + "bytes" + "database/sql" + "encoding/json" + "errors" + "fmt" +) + +// Bool is a nullable bool. False input is considered null. +// JSON marshals to false if null. +// Considered null to SQL unmarshaled from a false value. +type Bool struct { + sql.NullBool +} + +// NewBool creates a new Bool +func NewBool(b bool, valid bool) Bool { + return Bool{ + NullBool: sql.NullBool{ + Bool: b, + Valid: valid, + }, + } +} + +// BoolFrom creates a new Bool that will be null if false. +func BoolFrom(b bool) Bool { + return NewBool(b, b) +} + +// BoolFromPtr creates a new Bool that be null if b is nil. +func BoolFromPtr(b *bool) Bool { + if b == nil { + return NewBool(false, false) + } + return NewBool(*b, true) +} + +// ValueOrZero returns the inner value if valid, otherwise false. +func (b Bool) ValueOrZero() bool { + return b.Valid && b.Bool +} + +// UnmarshalJSON implements json.Unmarshaler. +// "false" will be considered a null Bool. +func (b *Bool) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, nullBytes) { + b.Valid = false + return nil + } + + if err := json.Unmarshal(data, &b.Bool); err != nil { + return fmt.Errorf("zero: couldn't unmarshal JSON: %w", err) + } + + b.Valid = b.Bool + return nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null Bool if the input is false or blank. +// It will return an error if the input is not a float, blank, or "null". +func (b *Bool) UnmarshalText(text []byte) error { + str := string(text) + switch str { + case "", "null": + b.Valid = false + return nil + case "true": + b.Bool = true + b.Valid = true + return nil + case "false": + b.Bool = false + b.Valid = false + return nil + } + return errors.New("invalid input:" + str) +} + +// MarshalJSON implements json.Marshaler. +// It will encode null if this Bool is null. +func (b Bool) MarshalJSON() ([]byte, error) { + if !b.Valid || !b.Bool { + return []byte("false"), nil + } + return []byte("true"), nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a zero if this Bool is null. +func (b Bool) MarshalText() ([]byte, error) { + if !b.Valid || !b.Bool { + return []byte("false"), nil + } + return []byte("true"), nil +} + +// SetValid changes this Bool's value and also sets it to be non-null. +func (b *Bool) SetValid(v bool) { + b.Bool = v + b.Valid = true +} + +// Ptr returns a poBooler to this Bool's value, or a nil poBooler if this Bool is null. +func (b Bool) Ptr() *bool { + if !b.Valid { + return nil + } + return &b.Bool +} + +// IsZero returns true for null or zero Bools, for future omitempty support (Go 1.4?) +func (b Bool) IsZero() bool { + return !b.Valid || !b.Bool +} + +// Equal returns true if both booleans are true and valid, or if both booleans are either false or invalid. +func (b Bool) Equal(other Bool) bool { + return b.ValueOrZero() == other.ValueOrZero() +} diff --git a/vendor/gopkg.in/guregu/null.v4/zero/float.go b/vendor/gopkg.in/guregu/null.v4/zero/float.go new file mode 100644 index 000000000..d0a79ed66 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/zero/float.go @@ -0,0 +1,157 @@ +package zero + +import ( + "bytes" + "database/sql" + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "strconv" +) + +// Float is a nullable float64. Zero input will be considered null. +// JSON marshals to zero if null. +// Considered null to SQL if zero. +type Float struct { + sql.NullFloat64 +} + +// NewFloat creates a new Float +func NewFloat(f float64, valid bool) Float { + return Float{ + NullFloat64: sql.NullFloat64{ + Float64: f, + Valid: valid, + }, + } +} + +// FloatFrom creates a new Float that will be null if zero. +func FloatFrom(f float64) Float { + return NewFloat(f, f != 0) +} + +// FloatFromPtr creates a new Float that be null if f is nil. +func FloatFromPtr(f *float64) Float { + if f == nil { + return NewFloat(0, false) + } + return NewFloat(*f, true) +} + +// ValueOrZero returns the inner value if valid, otherwise zero. +func (f Float) ValueOrZero() float64 { + if !f.Valid { + return 0 + } + return f.Float64 +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports number and null input. +// 0 will be considered a null Float. +func (f *Float) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, nullBytes) { + f.Valid = false + return nil + } + + if err := json.Unmarshal(data, &f.Float64); err != nil { + var typeError *json.UnmarshalTypeError + if errors.As(err, &typeError) { + // special case: accept string input + if typeError.Value != "string" { + return fmt.Errorf("zero: JSON input is invalid type (need float or string): %w", err) + } + var str string + if err := json.Unmarshal(data, &str); err != nil { + return fmt.Errorf("zero: couldn't unmarshal number string: %w", err) + } + n, err := strconv.ParseFloat(str, 64) + if err != nil { + return fmt.Errorf("zero: couldn't convert string to float: %w", err) + } + f.Float64 = n + f.Valid = n != 0 + return nil + } + return fmt.Errorf("zero: couldn't unmarshal JSON: %w", err) + } + + f.Valid = f.Float64 != 0 + return nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null Float if the input is blank or zero. +// It will return an error if the input is not a float, blank, or "null". +func (f *Float) UnmarshalText(text []byte) error { + str := string(text) + if str == "" || str == "null" { + f.Valid = false + return nil + } + var err error + f.Float64, err = strconv.ParseFloat(string(text), 64) + if err != nil { + return fmt.Errorf("zero: couldn't unmarshal text: %w", err) + } + f.Valid = f.Float64 != 0 + return err +} + +// MarshalJSON implements json.Marshaler. +// It will encode null if this Float is null. +func (f Float) MarshalJSON() ([]byte, error) { + n := f.Float64 + if !f.Valid { + n = 0 + } + if math.IsInf(f.Float64, 0) || math.IsNaN(f.Float64) { + return nil, &json.UnsupportedValueError{ + Value: reflect.ValueOf(f.Float64), + Str: strconv.FormatFloat(f.Float64, 'g', -1, 64), + } + } + return []byte(strconv.FormatFloat(n, 'f', -1, 64)), nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a zero if this Float is null. +func (f Float) MarshalText() ([]byte, error) { + n := f.Float64 + if !f.Valid { + n = 0 + } + return []byte(strconv.FormatFloat(n, 'f', -1, 64)), nil +} + +// SetValid changes this Float's value and also sets it to be non-null. +func (f *Float) SetValid(v float64) { + f.Float64 = v + f.Valid = true +} + +// Ptr returns a poFloater to this Float's value, or a nil poFloater if this Float is null. +func (f Float) Ptr() *float64 { + if !f.Valid { + return nil + } + return &f.Float64 +} + +// IsZero returns true for null or zero Floats, for future omitempty support (Go 1.4?) +func (f Float) IsZero() bool { + return !f.Valid || f.Float64 == 0 +} + +// Equal returns true if both floats have the same value or are both either null or zero. +// Warning: calculations using floating point numbers can result in different ways +// the numbers are stored in memory. Therefore, this function is not suitable to +// compare the result of a calculation. Use this method only to check if the value +// has changed in comparison to some previous value. +func (f Float) Equal(other Float) bool { + return f.ValueOrZero() == other.ValueOrZero() +} diff --git a/vendor/gopkg.in/guregu/null.v4/zero/int.go b/vendor/gopkg.in/guregu/null.v4/zero/int.go new file mode 100644 index 000000000..f7092ffd8 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/zero/int.go @@ -0,0 +1,146 @@ +package zero + +import ( + "bytes" + "database/sql" + "encoding/json" + "errors" + "fmt" + "strconv" +) + +// Int is a nullable int64. +// JSON marshals to zero if null. +// Considered null to SQL if zero. +type Int struct { + sql.NullInt64 +} + +// NewInt creates a new Int +func NewInt(i int64, valid bool) Int { + return Int{ + NullInt64: sql.NullInt64{ + Int64: i, + Valid: valid, + }, + } +} + +// IntFrom creates a new Int that will be null if zero. +func IntFrom(i int64) Int { + return NewInt(i, i != 0) +} + +// IntFromPtr creates a new Int that be null if i is nil. +func IntFromPtr(i *int64) Int { + if i == nil { + return NewInt(0, false) + } + n := NewInt(*i, true) + return n +} + +// ValueOrZero returns the inner value if valid, otherwise zero. +func (i Int) ValueOrZero() int64 { + if !i.Valid { + return 0 + } + return i.Int64 +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports number and null input. +// 0 will be considered a null Int. +func (i *Int) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, nullBytes) { + i.Valid = false + return nil + } + + if err := json.Unmarshal(data, &i.Int64); err != nil { + var typeError *json.UnmarshalTypeError + if errors.As(err, &typeError) { + // special case: accept string input + if typeError.Value != "string" { + return fmt.Errorf("zero: JSON input is invalid type (need int or string): %w", err) + } + var str string + if err := json.Unmarshal(data, &str); err != nil { + return fmt.Errorf("zero: couldn't unmarshal number string: %w", err) + } + n, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return fmt.Errorf("zero: couldn't convert string to int: %w", err) + } + i.Int64 = n + i.Valid = n != 0 + return nil + } + return fmt.Errorf("zero: couldn't unmarshal JSON: %w", err) + } + + i.Valid = i.Int64 != 0 + return nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null Int if the input is a blank, or zero. +// It will return an error if the input is not an integer, blank, or "null". +func (i *Int) UnmarshalText(text []byte) error { + str := string(text) + if str == "" || str == "null" { + i.Valid = false + return nil + } + var err error + i.Int64, err = strconv.ParseInt(string(text), 10, 64) + if err != nil { + return fmt.Errorf("zero: couldn't unmarshal text: %w", err) + } + i.Valid = i.Int64 != 0 + return err +} + +// MarshalJSON implements json.Marshaler. +// It will encode 0 if this Int is null. +func (i Int) MarshalJSON() ([]byte, error) { + n := i.Int64 + if !i.Valid { + n = 0 + } + return []byte(strconv.FormatInt(n, 10)), nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a zero if this Int is null. +func (i Int) MarshalText() ([]byte, error) { + n := i.Int64 + if !i.Valid { + n = 0 + } + return []byte(strconv.FormatInt(n, 10)), nil +} + +// SetValid changes this Int's value and also sets it to be non-null. +func (i *Int) SetValid(n int64) { + i.Int64 = n + i.Valid = true +} + +// Ptr returns a pointer to this Int's value, or a nil pointer if this Int is null. +func (i Int) Ptr() *int64 { + if !i.Valid { + return nil + } + return &i.Int64 +} + +// IsZero returns true for null or zero Ints, for future omitempty support (Go 1.4?) +func (i Int) IsZero() bool { + return !i.Valid || i.Int64 == 0 +} + +// Equal returns true if both ints have the same value or are both either null or zero. +func (i Int) Equal(other Int) bool { + return i.ValueOrZero() == other.ValueOrZero() +} diff --git a/vendor/gopkg.in/guregu/null.v4/zero/string.go b/vendor/gopkg.in/guregu/null.v4/zero/string.go new file mode 100644 index 000000000..db1c832a9 --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/zero/string.go @@ -0,0 +1,111 @@ +// Package zero contains SQL types that consider zero input and null input to be equivalent +// with convenient support for JSON and text marshaling. +// Types in this package will JSON marshal to their zero value, even if null. +// Use the null parent package if you don't want this. +package zero + +import ( + "bytes" + "database/sql" + "encoding/json" + "fmt" +) + +// nullBytes is a JSON null literal +var nullBytes = []byte("null") + +// String is a nullable string. +// JSON marshals to a blank string if null. +// Considered null to SQL if zero. +type String struct { + sql.NullString +} + +// NewString creates a new String +func NewString(s string, valid bool) String { + return String{ + NullString: sql.NullString{ + String: s, + Valid: valid, + }, + } +} + +// StringFrom creates a new String that will be null if s is blank. +func StringFrom(s string) String { + return NewString(s, s != "") +} + +// StringFromPtr creates a new String that be null if s is nil or blank. +// It will make s point to the String's value. +func StringFromPtr(s *string) String { + if s == nil { + return NewString("", false) + } + return NewString(*s, *s != "") +} + +// ValueOrZero returns the inner value if valid, otherwise zero. +func (s String) ValueOrZero() string { + if !s.Valid { + return "" + } + return s.String +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports string and null input. Blank string input produces a null String. +func (s *String) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, nullBytes) { + s.Valid = false + return nil + } + + if err := json.Unmarshal(data, &s.String); err != nil { + return fmt.Errorf("zero: couldn't unmarshal JSON: %w", err) + } + + s.Valid = s.String != "" + return nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a blank string when this String is null. +func (s String) MarshalText() ([]byte, error) { + if !s.Valid { + return []byte{}, nil + } + return []byte(s.String), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null String if the input is a blank string. +func (s *String) UnmarshalText(text []byte) error { + s.String = string(text) + s.Valid = s.String != "" + return nil +} + +// SetValid changes this String's value and also sets it to be non-null. +func (s *String) SetValid(v string) { + s.String = v + s.Valid = true +} + +// Ptr returns a pointer to this String's value, or a nil pointer if this String is null. +func (s String) Ptr() *string { + if !s.Valid { + return nil + } + return &s.String +} + +// IsZero returns true for null or empty strings, for potential future omitempty support. +func (s String) IsZero() bool { + return !s.Valid || s.String == "" +} + +// Equal returns true if both strings have the same value or are both either null or empty. +func (s String) Equal(other String) bool { + return s.ValueOrZero() == other.ValueOrZero() +} diff --git a/vendor/gopkg.in/guregu/null.v4/zero/time.go b/vendor/gopkg.in/guregu/null.v4/zero/time.go new file mode 100644 index 000000000..0e3a55c4b --- /dev/null +++ b/vendor/gopkg.in/guregu/null.v4/zero/time.go @@ -0,0 +1,146 @@ +package zero + +import ( + "database/sql" + "database/sql/driver" + "encoding/json" + "fmt" + "time" +) + +// Time is a nullable time.Time. +// JSON marshals to the zero value for time.Time if null. +// Considered to be null to SQL if zero. +type Time struct { + sql.NullTime +} + +// Value implements the driver Valuer interface. +func (t Time) Value() (driver.Value, error) { + if !t.Valid { + return nil, nil + } + return t.Time, nil +} + +// NewTime creates a new Time. +func NewTime(t time.Time, valid bool) Time { + return Time{ + NullTime: sql.NullTime{ + Time: t, + Valid: valid, + }, + } +} + +// TimeFrom creates a new Time that will +// be null if t is the zero value. +func TimeFrom(t time.Time) Time { + return NewTime(t, !t.IsZero()) +} + +// TimeFromPtr creates a new Time that will +// be null if t is nil or *t is the zero value. +func TimeFromPtr(t *time.Time) Time { + if t == nil { + return NewTime(time.Time{}, false) + } + return TimeFrom(*t) +} + +// ValueOrZero returns the inner value if valid, otherwise zero. +func (t Time) ValueOrZero() time.Time { + if !t.Valid { + return time.Time{} + } + return t.Time +} + +// MarshalJSON implements json.Marshaler. +// It will encode the zero value of time.Time +// if this time is invalid. +func (t Time) MarshalJSON() ([]byte, error) { + if !t.Valid { + return (time.Time{}).MarshalJSON() + } + return t.Time.MarshalJSON() +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports string and null input. +func (t *Time) UnmarshalJSON(data []byte) error { + switch string(data) { + case "null", `""`: + t.Valid = false + return nil + } + + if err := json.Unmarshal(data, &t.Time); err != nil { + return fmt.Errorf("zero: couldn't unmarshal JSON: %w", err) + } + + t.Valid = !t.Time.IsZero() + return nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode to an empty time.Time if invalid. +func (t Time) MarshalText() ([]byte, error) { + ti := t.Time + if !t.Valid { + ti = time.Time{} + } + return ti.MarshalText() +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It has compatibility with the null package in that it will accept empty strings as invalid values, +// which will be unmarshaled to an invalid zero value. +func (t *Time) UnmarshalText(text []byte) error { + str := string(text) + // allowing "null" is for backwards compatibility with v3 + if str == "" || str == "null" { + t.Valid = false + return nil + } + if err := t.Time.UnmarshalText(text); err != nil { + return fmt.Errorf("zero: couldn't unmarshal text: %w", err) + } + t.Valid = !t.Time.IsZero() + return nil +} + +// SetValid changes this Time's value and +// sets it to be non-null. +func (t *Time) SetValid(v time.Time) { + t.Time = v + t.Valid = true +} + +// Ptr returns a pointer to this Time's value, +// or a nil pointer if this Time is zero. +func (t Time) Ptr() *time.Time { + if !t.Valid { + return nil + } + return &t.Time +} + +// IsZero returns true for null or zero Times, for potential future omitempty support. +func (t Time) IsZero() bool { + return !t.Valid || t.Time.IsZero() +} + +// Equal returns true if both Time objects encode the same time or are both are either null or zero. +// Two times can be equal even if they are in different locations. +// For example, 6:00 +0200 CEST and 4:00 UTC are Equal. +func (t Time) Equal(other Time) bool { + return t.ValueOrZero().Equal(other.ValueOrZero()) +} + +// ExactEqual returns true if both Time objects are equal or both are either null or zero. +// ExactEqual returns false for times that are in different locations or +// have a different monotonic clock reading. +func (t Time) ExactEqual(other Time) bool { + return t.ValueOrZero() == other.ValueOrZero() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 050db3412..549279671 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -129,6 +129,17 @@ github.com/davecgh/go-spew/spew # github.com/disintegration/imaging v1.6.0 ## explicit github.com/disintegration/imaging +# github.com/doug-martin/goqu/v9 v9.18.0 +## explicit; go 1.12 +github.com/doug-martin/goqu/v9 +github.com/doug-martin/goqu/v9/dialect/sqlite3 +github.com/doug-martin/goqu/v9/exec +github.com/doug-martin/goqu/v9/exp +github.com/doug-martin/goqu/v9/internal/errors +github.com/doug-martin/goqu/v9/internal/sb +github.com/doug-martin/goqu/v9/internal/tag +github.com/doug-martin/goqu/v9/internal/util +github.com/doug-martin/goqu/v9/sqlgen # github.com/fsnotify/fsnotify v1.5.1 ## explicit; go 1.13 github.com/fsnotify/fsnotify @@ -249,7 +260,7 @@ github.com/matryer/moq github.com/matryer/moq/internal/registry github.com/matryer/moq/internal/template github.com/matryer/moq/pkg/moq -# github.com/mattn/go-sqlite3 v1.14.6 +# github.com/mattn/go-sqlite3 v1.14.7 ## explicit; go 1.12 github.com/mattn/go-sqlite3 # github.com/mitchellh/go-homedir v1.1.0 @@ -366,6 +377,10 @@ github.com/urfave/cli/v2 # github.com/vearutop/statigz v1.1.6 ## explicit; go 1.16 github.com/vearutop/statigz +# github.com/vektah/dataloaden v0.3.0 +## explicit +github.com/vektah/dataloaden +github.com/vektah/dataloaden/pkg/generator # github.com/vektah/gqlparser/v2 v2.4.1 ## explicit; go 1.16 github.com/vektah/gqlparser/v2 @@ -401,12 +416,12 @@ golang.org/x/image/tiff/lzw golang.org/x/image/vp8 golang.org/x/image/vp8l golang.org/x/image/webp -# golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 +# golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 ## explicit; go 1.17 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9 +# golang.org/x/net v0.0.0-20220722155237-a158d28d115b ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context/ctxhttp @@ -417,7 +432,7 @@ golang.org/x/net/internal/iana golang.org/x/net/internal/socket golang.org/x/net/ipv4 golang.org/x/net/publicsuffix -# golang.org/x/sys v0.0.0-20220329152356-43be30ef3008 +# golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -453,12 +468,13 @@ golang.org/x/text/language golang.org/x/text/runes golang.org/x/text/transform golang.org/x/text/unicode/norm -# golang.org/x/tools v0.1.10 -## explicit; go 1.17 +# golang.org/x/tools v0.1.12 +## explicit; go 1.18 golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/gcimporter golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/internal/pkgbits golang.org/x/tools/go/packages golang.org/x/tools/imports golang.org/x/tools/internal/event @@ -472,10 +488,12 @@ golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 -## explicit; go 1.11 -golang.org/x/xerrors -golang.org/x/xerrors/internal +# golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f +## explicit; go 1.17 +# gopkg.in/guregu/null.v4 v4.0.0 +## explicit +gopkg.in/guregu/null.v4 +gopkg.in/guregu/null.v4/zero # gopkg.in/ini.v1 v1.66.4 ## explicit gopkg.in/ini.v1